entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
Decoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pr/cprthrqz6iotcmrjfcrj7taqntzxisdcjtr54gsuz2ck2kf6kbsr.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + (x0), tmp5, xmask) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3f/c3fojdylt3acgtptumtowi32d26uvrqtyzefg7ta476ezqj5dtyp.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.view] # Source node to ATen node mapping: # out_2 => view_7 # Graph fragment: # %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 1]), kwargs = {}) triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*((x0 % 4) // 4)) + (16*(((4*((x0 // 4) % 4)) + (x0 % 4)) // 16))), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ja/cjav5bl6r5w6bg5s2wofxugsfwzslejkqeoc3ia5g7rbnsj7egs6.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_3 => relu_1 # Graph fragment: # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_8,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_13, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/h4/ch4a5wnmo3unffqbnkhtgycbtjswoyflrr2s3gvx5gv43b7vybxg.py # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.view] # Source node to ATen node mapping: # out_4 => view_14 # Graph fragment: # %view_14 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_13, [64, 2]), kwargs = {}) triton_poi_fused_view_3 = async_compile.triton('triton_poi_fused_view_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (2*x1) + (8*((x1 % 4) // 4)) + (32*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6l/c6lsk2d35jkl6q4phvih4qzvl4j5us3xs2klgp5wcgxzr53nypop.py # Topologically Sorted Source Nodes: [w], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # w => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_15,), kwargs = {}) triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 1), (1, 1)) assert_size_stride(primals_5, (2, ), (1, )) assert_size_stride(primals_6, (4, 2), (2, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.view] triton_poi_fused_view_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) del buf1 buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (1, 2), (1, 1), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf3 # reuse buf8 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf8, 128, grid=grid(128), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.view] triton_poi_fused_view_3.run(buf4, buf5, 128, grid=grid(128), stream=stream0) del buf4 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (2, 4), (1, 2), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [w], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_4.run(buf7, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 return (buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, buf7, primals_6, buf8, primals_4, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Decoder(nn.Module): def __init__(self, latent_size, out_size): super().__init__() self.linear1 = nn.Linear(latent_size, int(out_size / 4)) self.linear2 = nn.Linear(int(out_size / 4), int(out_size / 2)) self.linear3 = nn.Linear(int(out_size / 2), out_size) self.relu = nn.ReLU(True) self.sigmoid = nn.Sigmoid() def forward(self, z): out = self.linear1(z) out = self.relu(out) out = self.linear2(out) out = self.relu(out) out = self.linear3(out) w = self.sigmoid(out) return w def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_size': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x0 % 4 // 4) + 16 * ((4 * (x0 // 4 % 4) + x0 % 4) // 16)), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2 * x1 + 8 * (x1 % 4 // 4) + 32 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 1), (1, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (4, 2), (2, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, primals_2, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused_view_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (1, 2), (1, 1 ), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf3 buf8 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(128)](buf4, primals_5, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 2), (2, 1), torch.float32) triton_poi_fused_view_3[grid(128)](buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf4 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (2, 4), (1, 2 ), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_sigmoid_4[grid(256)](buf7, primals_7, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, buf7, primals_6, buf8, primals_4, buf9 class DecoderNew(nn.Module): def __init__(self, latent_size, out_size): super().__init__() self.linear1 = nn.Linear(latent_size, int(out_size / 4)) self.linear2 = nn.Linear(int(out_size / 4), int(out_size / 2)) self.linear3 = nn.Linear(int(out_size / 2), out_size) self.relu = nn.ReLU(True) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
finloop/usad
Decoder
false
15,350
[ "BSD-3-Clause" ]
65
5e1bf326af5f1325fa4676a2de978cae6db0481c
https://github.com/finloop/usad/tree/5e1bf326af5f1325fa4676a2de978cae6db0481c
BasicBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/fv/cfvfxpmpur3qlmurffwz4u56tgvw75i4lbjvzd25ortunbobyxnh.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten._native_batch_norm_legit, aten.relu] # Source node to ATen node mapping: # out_1 => add, rsqrt, var_mean # out_2 => relu # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) triton_per_fused__native_batch_norm_legit_relu_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tl.full([1, 1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tl.store(out_ptr2 + (r1 + (16*x0)), tmp25, xmask) tl.store(out_ptr3 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lk/clk53igo2wowtd6pq5zu23svybsi67ef5dmegrb3qnpketcev22z.py # Topologically Sorted Source Nodes: [out_4, out_6], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_4 => add_1, rsqrt_1, var_mean_1 # out_6 => relu_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_8,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_16, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = 0.0 tmp29 = tmp27 <= tmp28 tl.store(out_ptr2 + (r1 + (16*x0)), tmp27, xmask) tl.store(out_ptr3 + (r1 + (16*x0)), tmp29, xmask) tl.store(out_ptr4 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten._native_batch_norm_legit, aten.relu] stream0 = get_raw_stream(0) triton_per_fused__native_batch_norm_legit_relu_0.run(buf0, buf1, buf5, buf4, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out_4, out_6], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1.run(buf6, primals_1, buf7, buf11, buf12, buf10, 16, 16, grid=grid(16), stream=stream0) return (buf11, primals_1, primals_2, primals_3, buf0, reinterpret_tensor(buf4, (16, ), (1, ), 0), buf5, buf6, reinterpret_tensor(buf10, (16, ), (1, ), 0), buf12, reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) class BasicBlock(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_model='instance'): super(BasicBlock, self).__init__() if 'instance' == norm_model: norm_layer = nn.InstanceNorm2d else: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.stride = stride if stride != 1 or inplanes != planes: self.downsample = nn.Sequential(conv1x1(inplanes, planes, stride), norm_layer(planes)) else: self.downsample = downsample def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tl.full([1, 1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tl.store(out_ptr2 + (r1 + 16 * x0), tmp25, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0 , in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = 0.0 tmp29 = tmp27 <= tmp28 tl.store(out_ptr2 + (r1 + 16 * x0), tmp27, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_relu_0[grid(16)](buf0, buf1, buf5, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1[ grid(16)](buf6, primals_1, buf7, buf11, buf12, buf10, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) return buf11, primals_1, primals_2, primals_3, buf0, reinterpret_tensor( buf4, (16,), (1,), 0), buf5, buf6, reinterpret_tensor(buf10, (16,), (1,), 0), buf12, reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) class BasicBlockNew(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_model='instance'): super(BasicBlockNew, self).__init__() if 'instance' == norm_model: norm_layer = nn.InstanceNorm2d else: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.stride = stride if stride != 1 or inplanes != planes: self.downsample = nn.Sequential(conv1x1(inplanes, planes, stride), norm_layer(planes)) else: self.downsample = downsample def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ferodia/MichiGAN
BasicBlock
false
15,351
[ "MIT" ]
235
a49acb49f9659d7538e62faa3ed08e46afb0ddae
https://github.com/ferodia/MichiGAN/tree/a49acb49f9659d7538e62faa3ed08e46afb0ddae
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py # Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # score_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # score_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 return (reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), buf4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), primals_7, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 4), (16, 4, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 4), (16, 4, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F import torch.nn as nn class Attention(nn.Module): def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0): """ Attention Mechanism :param embed_dim: :param hidden_dim: :param out_dim: :param n_head: num of head (Multi-Head Attention) :param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot) :return (?, q_len, out_dim,) """ super(Attention, self).__init__() if hidden_dim is None: hidden_dim = embed_dim // n_head if out_dim is None: out_dim = embed_dim self.embed_dim = embed_dim self.hidden_dim = hidden_dim self.n_head = n_head self.score_function = score_function self.w_k = nn.Linear(embed_dim, n_head * hidden_dim) self.w_q = nn.Linear(embed_dim, n_head * hidden_dim) self.proj = nn.Linear(n_head * hidden_dim, out_dim) self.dropout = nn.Dropout(dropout) if score_function == 'mlp': self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2)) elif self.score_function == 'bi_linear': self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_dim) if self.weight is not None: self.weight.data.uniform_(-stdv, stdv) def forward(self, k, q): if len(q.shape) == 2: q = torch.unsqueeze(q, dim=1) if len(k.shape) == 2: k = torch.unsqueeze(k, dim=1) mb_size = k.shape[0] k_len = k.shape[1] q_len = q.shape[1] kx = self.w_k(k).view(mb_size, k_len, self.n_head, self.hidden_dim) kx = kx.permute(2, 0, 1, 3).contiguous().view(-1, k_len, self. hidden_dim) qx = self.w_q(q).view(mb_size, q_len, self.n_head, self.hidden_dim) qx = qx.permute(2, 0, 1, 3).contiguous().view(-1, q_len, self. hidden_dim) if self.score_function == 'dot_product': kt = kx.permute(0, 2, 1) score = torch.bmm(qx, kt) elif self.score_function == 'scaled_dot_product': kt = kx.permute(0, 2, 1) qkt = torch.bmm(qx, kt) score = torch.div(qkt, math.sqrt(self.hidden_dim)) elif self.score_function == 'mlp': kxx = torch.unsqueeze(kx, dim=1).expand(-1, q_len, -1, -1) qxx = torch.unsqueeze(qx, dim=2).expand(-1, -1, k_len, -1) kq = torch.cat((kxx, qxx), dim=-1) score = torch.tanh(torch.matmul(kq, self.weight)) elif self.score_function == 'bi_linear': qw = torch.matmul(qx, self.weight) kt = kx.permute(0, 2, 1) score = torch.bmm(qw, kt) else: raise RuntimeError('invalid score_function') score = F.softmax(score, dim=-1) output = torch.bmm(score, kx) output = torch.cat(torch.split(output, mb_size, dim=0), dim=-1) output = self.proj(output) output = self.dropout(output) return output, score def get_inputs(): return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4])] def get_init_inputs(): return [[], {'embed_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_8 return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0 ), buf4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0 ), primals_7, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0) class AttentionNew(nn.Module): def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1, score_function='dot_product', dropout=0): """ Attention Mechanism :param embed_dim: :param hidden_dim: :param out_dim: :param n_head: num of head (Multi-Head Attention) :param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot) :return (?, q_len, out_dim,) """ super(AttentionNew, self).__init__() if hidden_dim is None: hidden_dim = embed_dim // n_head if out_dim is None: out_dim = embed_dim self.embed_dim = embed_dim self.hidden_dim = hidden_dim self.n_head = n_head self.score_function = score_function self.w_k = nn.Linear(embed_dim, n_head * hidden_dim) self.w_q = nn.Linear(embed_dim, n_head * hidden_dim) self.proj = nn.Linear(n_head * hidden_dim, out_dim) self.dropout = nn.Dropout(dropout) if score_function == 'mlp': self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2)) elif self.score_function == 'bi_linear': self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_dim) if self.weight is not None: self.weight.data.uniform_(-stdv, stdv) def forward(self, input_0, input_1): primals_3 = self.w_k.weight primals_4 = self.w_k.bias primals_5 = self.w_q.weight primals_6 = self.w_q.bias primals_7 = self.proj.weight primals_8 = self.proj.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
fhamborg/NewsMTSC
Attention
false
15,352
[ "MIT" ]
46
5a8f88d7fbb921090e984cc378b02d75524c1025
https://github.com/fhamborg/NewsMTSC/tree/5a8f88d7fbb921090e984cc378b02d75524c1025
Round
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/e2/ce2czs2tflqlivpi5afrpgtcbqy6cgblp3xiuu77i6bolvvp6emb.py # Topologically Sorted Source Nodes: [mul, input_1, round_1, output, truediv], Original ATen: [aten.mul, aten.clamp, aten.round, aten.div] # Source node to ATen node mapping: # input_1 => clamp_max, clamp_min # mul => mul # output => mul_1 # round_1 => round_1 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 255.0), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 255.0), kwargs = {}) # %round_1 : [num_users=1] = call_function[target=torch.ops.aten.round.default](args = (%clamp_max,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%round_1, 1.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, 255.0), kwargs = {}) triton_poi_fused_clamp_div_mul_round_0 = async_compile.triton('triton_poi_fused_clamp_div_mul_round_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_mul_round_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_mul_round_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 255.0 tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = triton_helpers.minimum(tmp4, tmp1) tmp6 = libdevice.nearbyint(tmp5) tmp7 = 1.0 tmp8 = tmp6 * tmp7 tmp9 = 0.00392156862745098 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, input_1, round_1, output, truediv], Original ATen: [aten.mul, aten.clamp, aten.round, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_mul_round_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class Quant(torch.autograd.Function): @staticmethod def forward(ctx, input): input = torch.clamp(input, 0, 255.0) output = input.round() * 1.0 return output @staticmethod def backward(ctx, grad_output): return grad_output class Round(nn.Module): def __init__(self): super(Round, self).__init__() def forward(self, input, **kwargs): return Quant.apply(input * 255.0) / 255.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_mul_round_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 255.0 tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = triton_helpers.minimum(tmp4, tmp1) tmp6 = libdevice.nearbyint(tmp5) tmp7 = 1.0 tmp8 = tmp6 * tmp7 tmp9 = 0.00392156862745098 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_mul_round_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class Quant(torch.autograd.Function): @staticmethod def forward(ctx, input): input = torch.clamp(input, 0, 255.0) output = input.round() * 1.0 return output @staticmethod def backward(ctx, grad_output): return grad_output class RoundNew(nn.Module): def __init__(self): super(RoundNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
felixcheng97/IICNet
Round
false
15,353
[ "MIT" ]
50
2648d7148c01a03226128c24a285c4a52e2b5aa0
https://github.com/felixcheng97/IICNet/tree/2648d7148c01a03226128c24a285c4a52e2b5aa0
PadSameConv2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xs/cxs2a7zwcw5yxvn445xldhvii7772mtsthpxnfawxoahvyf3vtaj.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # pad => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [1, 2, 1, 2], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 7) % 7 x0 = xindex % 7 x2 = (xindex // 49) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 784, grid=grid(784), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](arg0_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class PadSameConv2dNew(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fish258/MonoRec
PadSameConv2d
false
15,354
[ "MIT" ]
388
c0612d2710802004cdd83205e63d0582de543c41
https://github.com/fish258/MonoRec/tree/c0612d2710802004cdd83205e63d0582de543c41
Encoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/w2/cw2tk4wuq2lqaw5ouidkubjzk6zrv7ajgyj5jyqr3yffoa5n5m2t.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u4/cu4dcn33sgl2ahkb2jwcn4auehbaoshytxhcuib7v4wwlusyrimo.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.view] # Source node to ATen node mapping: # out_2 => view_7 # Graph fragment: # %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 2]), kwargs = {}) triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (2*x1) + (8*((x1 % 4) // 4)) + (32*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6m/c6mfjdig7ut35m7deswr3fle6h6lpdmwl6b6sgilwkylic2ojowj.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_3 => relu_1 # Graph fragment: # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_8,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_13, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + (x0), tmp5, xmask) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/36/c366mevtdl3dtf4g4py5tamkbxlhxdcnckbbfzzdmm7gsjksvc3c.py # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.view] # Source node to ATen node mapping: # out_4 => view_14 # Graph fragment: # %view_14 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_13, [64, 1]), kwargs = {}) triton_poi_fused_view_3 = async_compile.triton('triton_poi_fused_view_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*((x0 % 4) // 4)) + (16*(((4*((x0 // 4) % 4)) + (x0 % 4)) // 16))), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rg/crglrlecx3e4jsnkmaqaqzsqzwziipxhqkyuuyuixoca5l4qszw7.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.relu, aten.view, aten.threshold_backward] # Source node to ATen node mapping: # z => relu_2, view_17 # Graph fragment: # %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_15,), kwargs = {}) # %view_17 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_16, [4, 4, 4, 4]), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_22, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_view_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_view_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_view_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_view_4(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr1 + (x4), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 2), (2, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, 1), (1, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf0 # reuse buf11 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf11, 128, grid=grid(128), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.view] triton_poi_fused_view_1.run(buf1, buf2, 128, grid=grid(128), stream=stream0) del buf1 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (2, 1), (1, 2), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf3 # reuse buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf10, 64, grid=grid(64), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.view] triton_poi_fused_view_3.run(buf4, buf5, 64, grid=grid(64), stream=stream0) del buf4 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (1, 4), (1, 1), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.relu, aten.view, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_view_4.run(buf7, primals_7, buf8, buf9, 256, grid=grid(256), stream=stream0) del buf7 del primals_7 return (buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, buf9, primals_6, buf10, primals_4, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 2), (2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Encoder(nn.Module): def __init__(self, in_size, latent_size): super().__init__() self.linear1 = nn.Linear(in_size, int(in_size / 2)) self.linear2 = nn.Linear(int(in_size / 2), int(in_size / 4)) self.linear3 = nn.Linear(int(in_size / 4), latent_size) self.relu = nn.ReLU(True) def forward(self, w): out = self.linear1(w) out = self.relu(out) out = self.linear2(out) out = self.relu(out) out = self.linear3(out) z = self.relu(out) return z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2 * x1 + 8 * (x1 % 4 // 4) + 32 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x0 % 4 // 4) + 16 * ((4 * (x0 // 4 % 4) + x0 % 4) // 16)), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_view_4(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 2), (2, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 1), (1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 buf11 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1, primals_2, buf11, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) triton_poi_fused_view_1[grid(128)](buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (2, 1), (1, 2 ), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(64)](buf4, primals_5, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused_view_3[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (1, 4), (1, 1 ), 0), out=buf6) buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_view_4[grid(256)](buf7, primals_7, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 del primals_7 return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2, buf5, buf9, primals_6, buf10, primals_4, buf11 class EncoderNew(nn.Module): def __init__(self, in_size, latent_size): super().__init__() self.linear1 = nn.Linear(in_size, int(in_size / 2)) self.linear2 = nn.Linear(int(in_size / 2), int(in_size / 4)) self.linear3 = nn.Linear(int(in_size / 4), latent_size) self.relu = nn.ReLU(True) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
finloop/usad
Encoder
false
15,355
[ "BSD-3-Clause" ]
65
5e1bf326af5f1325fa4676a2de978cae6db0481c
https://github.com/finloop/usad/tree/5e1bf326af5f1325fa4676a2de978cae6db0481c
Offset
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pb/cpbshtrpnaucbs7hqoiqkvsndkguzk4er52bxaovgoqcnlnyqv6v.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Offset(nn.Module): def __init__(self, init_value=0.0): super(Offset, self).__init__() self.bias = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input): return input + self.bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class OffsetNew(nn.Module): def __init__(self, init_value=0.0): super(OffsetNew, self).__init__() self.bias = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input_0): primals_1 = self.bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
flipson/dd3d
Offset
false
15,356
[ "MIT" ]
227
86d8660c29612b79836dad9b6c39972ac2ca1557
https://github.com/flipson/dd3d/tree/86d8660c29612b79836dad9b6c39972ac2ca1557
GlobalSumPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zn/cznprkeuyccqrgaq2oqedmt6b75mnapuac3rarw7btkcaemyq7x5.py # Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum] # Source node to ATen node mapping: # sum_1 => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [2, 3]), kwargs = {}) triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum] stream0 = get_raw_stream(0) triton_per_fused_sum_0.run(arg0_1, buf0, 16, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.cpp_extension class GlobalSumPool2d(nn.Module): def forward(self, x): return torch.sum(x, [2, 3]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_sum_0[grid(16)](arg0_1, buf0, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return buf0, class GlobalSumPool2dNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
STomoya/animeface
GlobalSumPool2d
false
15,357
[ "MIT" ]
61
37b3cd26097d7874559d4c152e41e5712b7a1a42
https://github.com/STomoya/animeface/tree/37b3cd26097d7874559d4c152e41e5712b7a1a42
period_L2
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wg/cwgxjhqowekqigbs3dtqc5m2wxxutwjf53bjkq2w7b572asvshzo.py # Topologically Sorted Source Nodes: [dt, sub_1, remainder, sub_2, loss, loss_1], Original ATen: [aten.sub, aten.remainder, aten.pow, aten.sum] # Source node to ATen node mapping: # dt => sub # loss => pow_1 # loss_1 => sum_1 # remainder => remainder # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, 1.5707963267948966), kwargs = {}) # %remainder : [num_users=1] = call_function[target=torch.ops.aten.remainder.Scalar](args = (%sub_1, 3.141592653589793), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%remainder, 1.5707963267948966), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) triton_per_fused_pow_remainder_sub_sum_0 = async_compile.triton('triton_per_fused_pow_remainder_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_pow_remainder_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_pow_remainder_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = 1.5707963267948966 tmp4 = tmp2 - tmp3 tmp5 = 3.141592653589793 tmp6 = tmp4 % tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = tmp6 != tmp7 tmp9 = libdevice.signbit(tmp6) if (tmp6).dtype is tl.float32 else tmp6 < 0 tmp10 = libdevice.signbit(tmp5) if (tmp5).dtype is tl.float32 else tmp5 < 0 tmp11 = tmp9 != tmp10 tmp12 = tmp8 & tmp11 tmp13 = tmp6 + tmp5 tmp14 = tl.where(tmp12, tmp13, tmp6) tmp15 = tmp14 - tmp3 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [dt, sub_1, remainder, sub_2, loss, loss_1], Original ATen: [aten.sub, aten.remainder, aten.pow, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_pow_remainder_sub_sum_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn def reduction_mean(loss): return loss.mean() def reduction_none(loss): return loss def reduction_sum(loss): return loss.sum() class period_L2(nn.Module): def __init__(self, reduction='sum'): """ periodic Squared Error """ super().__init__() if reduction == 'sum': self.reduction = reduction_sum elif reduction == 'mean': self.reduction = reduction_mean elif reduction == 'none': self.reduction = reduction_none else: raise Exception('unknown reduction') def forward(self, theta_pred, theta_gt): dt = theta_pred - theta_gt loss = (torch.remainder(dt - np.pi / 2, np.pi) - np.pi / 2) ** 2 assert (loss >= 0).all() loss = self.reduction(loss) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_pow_remainder_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = 1.5707963267948966 tmp4 = tmp2 - tmp3 tmp5 = 3.141592653589793 tmp6 = tmp4 % tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = tmp6 != tmp7 tmp9 = libdevice.signbit(tmp6) if tmp6.dtype is tl.float32 else tmp6 < 0 tmp10 = libdevice.signbit(tmp5) if tmp5.dtype is tl.float32 else tmp5 < 0 tmp11 = tmp9 != tmp10 tmp12 = tmp8 & tmp11 tmp13 = tmp6 + tmp5 tmp14 = tl.where(tmp12, tmp13, tmp6) tmp15 = tmp14 - tmp3 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_pow_remainder_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, def reduction_mean(loss): return loss.mean() def reduction_none(loss): return loss def reduction_sum(loss): return loss.sum() class period_L2New(nn.Module): def __init__(self, reduction='sum'): """ periodic Squared Error """ super().__init__() if reduction == 'sum': self.reduction = reduction_sum elif reduction == 'mean': self.reduction = reduction_mean elif reduction == 'none': self.reduction = reduction_none else: raise Exception('unknown reduction') def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
flytocc/RAPiD
period_L2
false
15,358
[ "MIT" ]
142
92e6a44b8a0107def055e93c971d78fd548562f8
https://github.com/flytocc/RAPiD/tree/92e6a44b8a0107def055e93c971d78fd548562f8
ConvReLU2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/he/che6of2h7dpdn7tylznqowewpa4tkh52ycsvrwhevjksqz53nvqz.py # Topologically Sorted Source Nodes: [t], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # t => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 0, 1, 2], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 448 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 7 x2 = (xindex // 28) x3 = xindex % 28 x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-4) + x3 + (16*x2)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ik/cik5qbhwt3mkw2phkuck42e54kmmja5u7bwdegg6xzogiza2muuy.py # Topologically Sorted Source Nodes: [t_1, t_2], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # t_1 => convolution # t_2 => gt # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3e/c3evie2aplgws6rylspn2st64zpdgobm3ds772qjly2u47bm2afh.py # Topologically Sorted Source Nodes: [t_1, t_2, t_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.constant_pad_nd] # Source node to ATen node mapping: # t_1 => convolution # t_2 => mul, where # t_3 => constant_pad_nd_1 # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) # %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%where, [1, 2, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 448 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x4 = (xindex // 7) x2 = (xindex // 28) % 4 x5 = xindex tmp0 = (-1) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-1) + x0 + (4*x4)), tmp5 & xmask, other=0.0).to(tl.int1) tmp7 = tl.load(in_ptr1 + ((-1) + x0 + (4*x4)), tmp5 & xmask, other=0.0) tmp8 = tl.load(in_ptr2 + (x2), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = 0.1 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp6, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp5, tmp12, tmp13) tl.store(out_ptr0 + (x5), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3r/c3rhsh6pvtx3qvjzn43mjmgunrzfui5s3animgnt4pazacidrq7y.py # Topologically Sorted Source Nodes: [t_4, leaky_relu_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # leaky_relu_1 => gt_1, mul_1, where_1 # t_4 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 4), (112, 28, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [t], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 448, grid=grid(448), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [t_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [t_1, t_2], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 7), (112, 28, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [t_1, t_2, t_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2.run(buf2, buf1, primals_3, buf3, 448, grid=grid(448), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [t_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf6 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [t_4, leaky_relu_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_3.run(buf4, primals_5, buf5, buf6, 256, grid=grid(256), stream=stream0) del buf4 del primals_5 return (buf6, primals_2, primals_4, buf0, buf2, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 4), (16, 4, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import LeakyReLU class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvReLU2(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, leaky_relu_neg_slope=0.1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad_0 = PadSameConv2d(kernel_size=(kernel_size, 1), stride=( stride, 1)) self.conv_y = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=(kernel_size, 1), stride=(stride, 1)) self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope) self.pad_1 = PadSameConv2d(kernel_size=(1, kernel_size), stride=(1, stride)) self.conv_x = Conv2d(in_channels=out_channels, out_channels= out_channels, kernel_size=(1, kernel_size), stride=(1, stride)) def forward(self, x: 'torch.Tensor'): t = self.pad_0(x) t = self.conv_y(t) t = self.leaky_relu(t) t = self.pad_1(t) t = self.conv_x(t) return self.leaky_relu(t) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import LeakyReLU assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 448 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 7 x2 = xindex // 28 x3 = xindex % 28 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 448 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x4 = xindex // 7 x2 = xindex // 28 % 4 x5 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0).to(tl .int1) tmp7 = tl.load(in_ptr1 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0) tmp8 = tl.load(in_ptr2 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = 0.1 tmp11 = tmp9 * tmp10 tmp12 = tl.where(tmp6, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp5, tmp12, tmp13) tl.store(out_ptr0 + x5, tmp14, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 4), (16, 4, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 4), (112, 28, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(448)](primals_1, buf0, 448, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 7), (112, 28, 7, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_leaky_relu_2[grid(448)]( buf2, buf1, primals_3, buf3, 448, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf6 = buf1 del buf1 triton_poi_fused_convolution_leaky_relu_3[grid(256)](buf4, primals_5, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 return buf6, primals_2, primals_4, buf0, buf2, buf3, buf5 class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvReLU2New(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, leaky_relu_neg_slope=0.1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad_0 = PadSameConv2d(kernel_size=(kernel_size, 1), stride=( stride, 1)) self.conv_y = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=(kernel_size, 1), stride=(stride, 1)) self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope) self.pad_1 = PadSameConv2d(kernel_size=(1, kernel_size), stride=(1, stride)) self.conv_x = Conv2d(in_channels=out_channels, out_channels= out_channels, kernel_size=(1, kernel_size), stride=(1, stride)) def forward(self, input_0): primals_2 = self.conv_y.weight primals_3 = self.conv_y.bias primals_4 = self.conv_x.weight primals_5 = self.conv_x.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
fish258/MonoRec
ConvReLU2
false
15,359
[ "MIT" ]
388
c0612d2710802004cdd83205e63d0582de543c41
https://github.com/fish258/MonoRec/tree/c0612d2710802004cdd83205e63d0582de543c41
ChannelSELayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [squeeze_tensor], Original ATen: [aten.mean] # Source node to ATen node mapping: # squeeze_tensor => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [2]), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/eb/cebpbupczy3a7z6yffgxybumq5trdt3jp5hxwuoo6w6cunzz7d7h.py # Topologically Sorted Source Nodes: [fc_out_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # fc_out_1 => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nv/cnvpklwd233bi4zltxm6adn2vglwhs7tyu5gqqi7widiugltrkb2.py # Topologically Sorted Source Nodes: [output_tensor], Original ATen: [aten.mul] # Source node to ATen node mapping: # output_tensor => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_1), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = 3.0 tmp3 = tmp1 + tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = 6.0 tmp7 = triton_helpers.minimum(tmp5, tmp6) tmp8 = tmp1 * tmp7 tmp9 = 0.16666666666666666 tmp10 = tmp8 * tmp9 tmp11 = tmp0 * tmp10 tl.store(out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2, ), (1, )) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [squeeze_tensor], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf2) del primals_2 buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [fc_out_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_3, 8, grid=grid(8), stream=stream0) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_tensor], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0) return (buf5, primals_1, buf1, buf3, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.utils from matplotlib import cm as cm from torch.nn.parallel import * from torchvision.models import * from torchvision.datasets import * class ChannelSELayer(nn.Module): """ Copied from https://github.com/ai-med/squeeze_and_excitation/blob/master/squeeze_and_excitation/squeeze_and_excitation.py Re-implementation of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* MIT License Copyright (c) 2018 Abhijit Guha Roy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ def __init__(self, num_channels, reduction_ratio=2, dim_out=None, stride=1 ): """ :param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced """ super(ChannelSELayer, self).__init__() if dim_out is not None: assert dim_out == num_channels, (dim_out, num_channels, 'only same dimensionality is supported') num_channels_reduced = num_channels // reduction_ratio self.reduction_ratio = reduction_ratio self.stride = stride self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) self.relu = nn.ReLU() self.sigmoid = nn.Hardswish() def forward(self, input_tensor): """ :param input_tensor: X, shape = (batch_size, num_channels, H, W) :return: output tensor """ batch_size, num_channels, _H, _W = input_tensor.size() squeeze_tensor = input_tensor.reshape(batch_size, num_channels, -1 ).mean(dim=2) fc_out_1 = self.relu(self.fc1(squeeze_tensor)) fc_out_2 = self.sigmoid(self.fc2(fc_out_1)) a, b = squeeze_tensor.size() output_tensor = torch.mul(input_tensor, fc_out_2.view(a, b, 1, 1)) if self.stride > 1: output_tensor = output_tensor[:, :, ::self.stride, ::self.stride] return output_tensor def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data import torch.utils from matplotlib import cm as cm from torch.nn.parallel import * from torchvision.models import * from torchvision.datasets import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = 3.0 tmp3 = tmp1 + tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = 6.0 tmp7 = triton_helpers.minimum(tmp5, tmp6) tmp8 = tmp1 * tmp7 tmp9 = 0.16666666666666666 tmp10 = tmp8 * tmp9 tmp11 = tmp0 * tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_2, (4, 2), (1, 4 ), 0), out=buf2) del primals_2 buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(8)](buf3, primals_3, 8, XBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, buf3, reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf4) del primals_5 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, primals_1, buf1, buf3, buf4, primals_4 class ChannelSELayerNew(nn.Module): """ Copied from https://github.com/ai-med/squeeze_and_excitation/blob/master/squeeze_and_excitation/squeeze_and_excitation.py Re-implementation of Squeeze-and-Excitation (SE) block described in: *Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507* MIT License Copyright (c) 2018 Abhijit Guha Roy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ def __init__(self, num_channels, reduction_ratio=2, dim_out=None, stride=1 ): """ :param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced """ super(ChannelSELayerNew, self).__init__() if dim_out is not None: assert dim_out == num_channels, (dim_out, num_channels, 'only same dimensionality is supported') num_channels_reduced = num_channels // reduction_ratio self.reduction_ratio = reduction_ratio self.stride = stride self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) self.relu = nn.ReLU() self.sigmoid = nn.Hardswish() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
evdcush/ppuda
ChannelSELayer
false
15,360
[ "MIT" ]
262
22783ac92207da6730ee618c953af230c5c39f28
https://github.com/evdcush/ppuda/tree/22783ac92207da6730ee618c953af230c5c39f28
Upconv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7v/c7vsd3fryw4ameuaa7ye3zxgcno6raioyn3ogh4j4hjuesnfsqac.py # Topologically Sorted Source Nodes: [t, t_1], Original ATen: [aten._unsafe_index, aten.constant_pad_nd] # Source node to ATen node mapping: # t => _unsafe_index # t_1 => constant_pad_nd # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%_unsafe_index, [0, 1, 0, 1], 0.0), kwargs = {}) triton_poi_fused__unsafe_index_constant_pad_nd_0 = async_compile.triton('triton_poi_fused__unsafe_index_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 9) % 9 x0 = xindex % 9 x2 = (xindex // 81) x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 8, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tmp0.to(tl.float32) tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8.to(tl.int32) tmp10 = tmp3.to(tl.float32) tmp11 = tmp10 * tmp7 tmp12 = tmp11.to(tl.int32) tmp13 = tl.load(in_ptr0 + (tmp12 + (4*tmp9) + (16*x2)), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x4), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mt/cmt4roffhwfg6vw2odjfrgu4bjav3cztqx74kxjfq5igljucibfl.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) # Topologically Sorted Source Nodes: [t, t_1], Original ATen: [aten._unsafe_index, aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_constant_pad_nd_0.run(primals_1, buf0, 1296, grid=grid(1296), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 1024, grid=grid(1024), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import Upsample class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class Upconv(torch.nn.Module): def __init__(self, in_channels, out_channels): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.upsample = Upsample(scale_factor=2) self.pad = PadSameConv2d(kernel_size=2) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=2, stride=1) def forward(self, x: 'torch.Tensor'): t = self.upsample(x) t = self.pad(t) return self.conv(t) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import Upsample assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 9 % 9 x0 = xindex % 9 x2 = xindex // 81 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 8, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tmp0.to(tl.float32) tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8.to(tl.int32) tmp10 = tmp3.to(tl.float32) tmp11 = tmp10 * tmp7 tmp12 = tmp11.to(tl.int32) tmp13 = tl.load(in_ptr0 + (tmp12 + 4 * tmp9 + 16 * x2), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + x4, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 2, 2), (16, 4, 2, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_constant_pad_nd_0[grid(1296)](primals_1, buf0, 1296, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(1024)](buf2, primals_3, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class UpconvNew(torch.nn.Module): def __init__(self, in_channels, out_channels): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.upsample = Upsample(scale_factor=2) self.pad = PadSameConv2d(kernel_size=2) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=2, stride=1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fish258/MonoRec
Upconv
false
15,361
[ "MIT" ]
388
c0612d2710802004cdd83205e63d0582de543c41
https://github.com/fish258/MonoRec/tree/c0612d2710802004cdd83205e63d0582de543c41
OrthogonalFusion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/5m/c5mgkscpxmkpe5nca3fdpsatq7jztthvcxfvjtoqp2arx2m5zmcz.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%expand, %sub], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x2 = (xindex // 128) x0 = xindex % 16 x4 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x2) + x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1))), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr2 + (x0 + (16*((-4) + x1))), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr0 + (4*x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 * tmp11 tmp13 = tl.load(in_ptr0 + (1 + (4*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tl.load(in_ptr0 + (2 + (4*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tl.load(in_ptr0 + (3 + (4*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = libdevice.sqrt(tmp21) tmp23 = tmp22 * tmp22 tmp24 = tmp10 / tmp23 tmp25 = tmp9 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp6, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp5, tmp27) tl.store(out_ptr0 + (x4), tmp28, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [projection], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 1, 4), (4, 4, 1), 0), arg1_1, out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 0), buf0, out=buf1) del buf0 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, arg1_1, buf1, buf2, 512, grid=grid(512), stream=stream0) del arg0_1 del arg1_1 del buf1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class OrthogonalFusion(nn.Module): def __init__(self): super().__init__() def forward(self, local_feat, global_feat): global_feat_norm = torch.norm(global_feat, p=2, dim=1) projection = torch.bmm(global_feat.unsqueeze(1), torch.flatten( local_feat, start_dim=2)) projection = torch.bmm(global_feat.unsqueeze(2), projection).view( local_feat.size()) projection = projection / (global_feat_norm * global_feat_norm).view( -1, 1, 1, 1) orthogonal_comp = local_feat - projection global_feat = global_feat.unsqueeze(-1).unsqueeze(-1) return torch.cat([global_feat.expand(orthogonal_comp.size()), orthogonal_comp], dim=1) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x2 = xindex // 128 x0 = xindex % 16 x4 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr0 + 4 * x2, tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp11 * tmp11 tmp13 = tl.load(in_ptr0 + (1 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tl.load(in_ptr0 + (2 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tl.load(in_ptr0 + (3 + 4 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = libdevice.sqrt(tmp21) tmp23 = tmp22 * tmp22 tmp24 = tmp10 / tmp23 tmp25 = tmp9 - tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp6, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp5, tmp27) tl.store(out_ptr0 + x4, tmp28, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 1, 4), (4, 4, 1), 0), arg1_1, out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 0), buf0, out=buf1) del buf0 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf1, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf1 return buf2, class OrthogonalFusionNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
flrngel/DOLG-pytorch
OrthogonalFusion
false
15,362
[ "MIT" ]
56
97732d2932ef6733f17cf8ac1aee990effe6fd64
https://github.com/flrngel/DOLG-pytorch/tree/97732d2932ef6733f17cf8ac1aee990effe6fd64
compute_g_spa
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xs/cxs6cnizwstrvtjkstrg4wcxe72klfug4afxftlhnht65y7cgp7b.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 768 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 192 y1 = (yindex // 192) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (192*x2) + (786432*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kt/cktoi3erhttvhvqek67f5tc5ed7lu2ehmdgcebulwbmgbwjno7bm.py # Topologically Sorted Source Nodes: [g1], Original ATen: [aten.clone] # Source node to ATen node mapping: # g1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3145728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 192 x1 = (xindex // 192) % 64 x2 = (xindex // 12288) % 64 x3 = (xindex // 786432) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (192*x2) + (12288*x1) + (786432*x3)), None) tl.store(out_ptr0 + (x4), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mv/cmv4zr7nvy5c53g23wbtqamn7qtqx2tv5khvahyrm3utu2eq3vh7.py # Topologically Sorted Source Nodes: [g2], Original ATen: [aten.clone] # Source node to ATen node mapping: # g2 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 49152 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 12288 y1 = (yindex // 12288) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (12288*x2) + (786432*y1)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (64*y3)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/37/c37gipmpvxufxeonpbhqiaa5t4frvvu5hyksfogf6bg6yfkqy34l.py # Topologically Sorted Source Nodes: [g], Original ATen: [aten._softmax] # Source node to ATen node mapping: # g => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_3 = async_compile.triton('triton_per_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16384, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16384 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tmp5 / tmp8 tl.store(out_ptr2 + (r1 + (64*x0)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (192, 192, 1, 1), (192, 1, 1, 1)) assert_size_stride(primals_2, (4, 192, 64, 64), (786432, 4096, 64, 1)) assert_size_stride(primals_3, (192, 192, 1, 1), (192, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 192, 64, 64), (786432, 1, 12288, 192), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_2, buf0, 768, 4096, grid=grid(768, 4096), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 192, 64, 64), (786432, 1, 12288, 192)) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 192, 64, 64), (786432, 1, 12288, 192)) buf3 = empty_strided_cuda((4, 64, 64, 192), (786432, 12288, 192, 1), torch.float32) # Topologically Sorted Source Nodes: [g1], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, buf3, 3145728, grid=grid(3145728), stream=stream0) buf4 = reinterpret_tensor(buf1, (4, 64, 192, 64), (786432, 12288, 64, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [g2], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf2, buf4, 49152, 64, grid=grid(49152, 64), stream=stream0) del buf2 buf5 = empty_strided_cuda((256, 64, 64), (4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [g3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (256, 64, 192), (12288, 192, 1), 0), reinterpret_tensor(buf4, (256, 192, 64), (12288, 64, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [g], Original ATen: [aten._softmax] triton_per_fused__softmax_3.run(buf5, buf8, 16384, 64, grid=grid(16384), stream=stream0) del buf5 return (buf8, primals_1, buf0, primals_3, buf8, reinterpret_tensor(buf3, (256, 192, 64), (12288, 1, 192), 0), reinterpret_tensor(buf4, (256, 64, 192), (12288, 1, 64), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((192, 192, 1, 1), (192, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 192, 64, 64), (786432, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((192, 192, 1, 1), (192, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class cnn1x1(nn.Module): def __init__(self, dim1=3, dim2=3, bias=True): super(cnn1x1, self).__init__() self.cnn = nn.Conv2d(dim1, dim2, kernel_size=1, bias=bias) def forward(self, x): x = self.cnn(x) return x class compute_g_spa(nn.Module): def __init__(self, dim1=64 * 3, dim2=64 * 3, bias=False): super(compute_g_spa, self).__init__() self.dim1 = dim1 self.dim2 = dim2 self.g1 = cnn1x1(self.dim1, self.dim2, bias=bias) self.g2 = cnn1x1(self.dim1, self.dim2, bias=bias) self.softmax = nn.Softmax(dim=-1) def forward(self, x1): g1 = self.g1(x1).permute(0, 3, 2, 1).contiguous() g2 = self.g2(x1).permute(0, 3, 1, 2).contiguous() g3 = g1.matmul(g2) g = self.softmax(g3) return g def get_inputs(): return [torch.rand([4, 192, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 768 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 192 y1 = yindex // 192 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 192 * x2 + 786432 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 192 x1 = xindex // 192 % 64 x2 = xindex // 12288 % 64 x3 = xindex // 786432 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 192 * x2 + 12288 * x1 + 786432 * x3), None) tl.store(out_ptr0 + x4, tmp0, None) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 12288 y1 = yindex // 12288 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12288 * x2 + 786432 * y1), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 64 * y3), tmp0, xmask) @triton.jit def triton_per_fused__softmax_3(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tmp5 / tmp8 tl.store(out_ptr2 + (r1 + 64 * x0), tmp9, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (192, 192, 1, 1), (192, 1, 1, 1)) assert_size_stride(primals_2, (4, 192, 64, 64), (786432, 4096, 64, 1)) assert_size_stride(primals_3, (192, 192, 1, 1), (192, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 192, 64, 64), (786432, 1, 12288, 192), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(768, 4096)](primals_2, buf0, 768, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 192, 64, 64), (786432, 1, 12288, 192)) buf2 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 192, 64, 64), (786432, 1, 12288, 192)) buf3 = empty_strided_cuda((4, 64, 64, 192), (786432, 12288, 192, 1), torch.float32) triton_poi_fused_clone_1[grid(3145728)](buf1, buf3, 3145728, XBLOCK =512, num_warps=8, num_stages=1) buf4 = reinterpret_tensor(buf1, (4, 64, 192, 64), (786432, 12288, 64, 1), 0) del buf1 triton_poi_fused_clone_2[grid(49152, 64)](buf2, buf4, 49152, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf2 buf5 = empty_strided_cuda((256, 64, 64), (4096, 64, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (256, 64, 192), (12288, 192, 1), 0), reinterpret_tensor(buf4, (256, 192, 64), (12288, 64, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_per_fused__softmax_3[grid(16384)](buf5, buf8, 16384, 64, XBLOCK=8, num_warps=4, num_stages=1) del buf5 return buf8, primals_1, buf0, primals_3, buf8, reinterpret_tensor(buf3, (256, 192, 64), (12288, 1, 192), 0), reinterpret_tensor(buf4, (256, 64, 192), (12288, 1, 64), 0) class cnn1x1(nn.Module): def __init__(self, dim1=3, dim2=3, bias=True): super(cnn1x1, self).__init__() self.cnn = nn.Conv2d(dim1, dim2, kernel_size=1, bias=bias) def forward(self, x): x = self.cnn(x) return x class compute_g_spaNew(nn.Module): def __init__(self, dim1=64 * 3, dim2=64 * 3, bias=False): super(compute_g_spaNew, self).__init__() self.dim1 = dim1 self.dim2 = dim2 self.g1 = cnn1x1(self.dim1, self.dim2, bias=bias) self.g2 = cnn1x1(self.dim1, self.dim2, bias=bias) self.softmax = nn.Softmax(dim=-1) def forward(self, input_0): primals_1 = self.g1.cnn.weight primals_3 = self.g2.cnn.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fabro66/Online-Skeleton-based-Action-Recognition
compute_g_spa
false
15,363
[ "MIT" ]
63
de00cbf17ceea98a7d07f68bbbd966bfd02d3b40
https://github.com/fabro66/Online-Skeleton-based-Action-Recognition/tree/de00cbf17ceea98a7d07f68bbbd966bfd02d3b40
CompositeActivation
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6q/c6qxyfxjspag53ixwmhqtyfapvl7w7tupzhersjhlw7pr5ofsydj.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%div, %div_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = libdevice.atan(tmp5) tmp7 = 1.4925373134328357 tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp11 & xmask, other=0.0) tmp15 = libdevice.atan(tmp14) tmp16 = tmp15 * tmp15 tmp17 = 1.6666666666666667 tmp18 = tmp16 * tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp11, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp10, tmp20) tl.store(out_ptr0 + (x3), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class CompositeActivation(torch.nn.Module): def forward(self, x): x = torch.atan(x) return torch.cat([x / 0.67, x * x / 0.6], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = libdevice.atan(tmp5) tmp7 = 1.4925373134328357 tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp11 & xmask, other=0.0) tmp15 = libdevice.atan(tmp14) tmp16 = tmp15 * tmp15 tmp17 = 1.6666666666666667 tmp18 = tmp16 * tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp11, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp10, tmp20) tl.store(out_ptr0 + x3, tmp21, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class CompositeActivationNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fuzhanrahmanian/lucent
CompositeActivation
false
15,364
[ "Apache-2.0" ]
449
13b24c3c37784185275da73c7a11095b2ae809c5
https://github.com/fuzhanrahmanian/lucent/tree/13b24c3c37784185275da73c7a11095b2ae809c5
AddAndNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gi/cgi45knaonkz7f2ymhhwdszxotrkx3bl6ehavmfo5zp3bzzio3zk.py # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # layer_norm => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_0 = async_compile.triton('triton_poi_fused_add_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nh/cnhbjzi35eshrjhk7tjj644pcpxhgyi7ajxmdjzgjtdqbnbek7qt.py # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # layer_norm => add_1, add_2, mul, mul_1, rsqrt, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {}) triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp9, xmask) tl.store(out_ptr1 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_add_native_layer_norm_0.run(primals_1, primals_2, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_1.run(primals_1, primals_2, buf0, buf1, primals_3, primals_4, buf2, buf3, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_1 del primals_2 del primals_3 del primals_4 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AddAndNorm(nn.Module): def __init__(self, d_model): super(AddAndNorm, self).__init__() self.layer_norm = nn.LayerNorm(d_model) def forward(self, x, residual): return self.layer_norm(x + residual) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_native_layer_norm_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp9, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_native_layer_norm_0[grid(64)](primals_1, primals_2, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(256)](primals_1, primals_2, buf0, buf1, primals_3, primals_4, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 del primals_3 del primals_4 return buf3, buf2 class AddAndNormNew(nn.Module): def __init__(self, d_model): super(AddAndNormNew, self).__init__() self.layer_norm = nn.LayerNorm(d_model) def forward(self, input_0, input_1): primals_3 = self.layer_norm.weight primals_4 = self.layer_norm.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
francismontalbo/attention-is-all-you-need-paper
AddAndNorm
false
15,365
[ "MIT" ]
167
21ba3e48917da0c6808126d183bece6a9969cfd2
https://github.com/francismontalbo/attention-is-all-you-need-paper/tree/21ba3e48917da0c6808126d183bece6a9969cfd2
ConvSig
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xs/cxs2a7zwcw5yxvn445xldhvii7772mtsthpxnfawxoahvyf3vtaj.py # Topologically Sorted Source Nodes: [t], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # t => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 2, 1, 2], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 7) % 7 x0 = xindex % 7 x2 = (xindex // 49) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nl/cnlc6yjkbua5jkop4rrww37vaigeaa5rgiz5dbh7hjclxg6xrxjb.py # Topologically Sorted Source Nodes: [t_1, sigmoid], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # t_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [t], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [t_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [t_1, sigmoid], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import Sigmoid class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvSig(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride) self.sig = Sigmoid() def forward(self, x: 'torch.Tensor'): t = self.pad(x) t = self.conv(t) return self.sig(t) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import Sigmoid assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_sigmoid_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvSigNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride) self.sig = Sigmoid() def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fish258/MonoRec
ConvSig
false
15,366
[ "MIT" ]
388
c0612d2710802004cdd83205e63d0582de543c41
https://github.com/fish258/MonoRec/tree/c0612d2710802004cdd83205e63d0582de543c41
SqueezeEmbedding
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bl/cbl2rxdmd7vl7atdeosqikyevrmaoinhggmygw2rjrqvkexjpxgt.py # Topologically Sorted Source Nodes: [neg, sort, sort_1, x_len], Original ATen: [aten.neg, aten.sort, aten.index] # Source node to ATen node mapping: # neg => neg # sort => sort # sort_1 => getitem_3, sort_1 # x_len => index # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%neg,), kwargs = {}) # %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%getitem_1,), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%sort_1, 1), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [%getitem_1]), kwargs = {}) triton_per_fused_index_neg_sort_0 = async_compile.triton('triton_per_fused_index_neg_sort_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i16', 2: '*i64', 3: '*i64', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_neg_sort_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_index_neg_sort_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = -tmp0 tmp2 = r0 tmp3 = tmp2.to(tl.int16) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6, tmp7, = triton_helpers.sort_with_index(tmp4, tmp5, None, 1, stable=False, descending=False) tmp8 = tmp7.to(tl.int64) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp10, tmp11, = triton_helpers.sort_with_index(tmp9, tmp5, None, 1, stable=False, descending=False) tmp12 = tmp11.to(tl.int64) tmp13 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp14 = tmp8 + tmp13 tmp15 = tmp8 < 0 tmp16 = tl.where(tmp15, tmp14, tmp8) tl.device_assert((0 <= tmp16) & (tmp16 < 4), "index out of bounds: 0 <= tmp16 < 4") tmp18 = tl.load(in_ptr0 + (tmp16), None, eviction_policy='evict_last') tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp7, None) tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None) tl.store(out_ptr3 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp18, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lq/clqymvokcnia6n7yv7kbq5esk6ua7uet4ifi3agynd3zh6rr47mg.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.index] # Source node to ATen node mapping: # x => index_1 # Graph fragment: # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [%getitem_1]), kwargs = {}) triton_poi_fused_index_1 = async_compile.triton('triton_poi_fused_index_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i16', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert(((0 <= tmp5) & (tmp5 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp5 < 4") tmp7 = tl.load(in_ptr1 + (x0 + (64*tmp5)), xmask) tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, ), (1, )) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, ), (1, ), torch.int16) buf4 = empty_strided_cuda((4, ), (1, ), torch.int64) buf6 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [neg, sort, sort_1, x_len], Original ATen: [aten.neg, aten.sort, aten.index] stream0 = get_raw_stream(0) triton_per_fused_index_neg_sort_0.run(arg0_1, buf1, buf4, buf6, 1, 4, grid=grid(1), stream=stream0) del arg0_1 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.index] triton_poi_fused_index_1.run(buf1, arg1_1, buf5, 256, grid=grid(256), stream=stream0) del arg1_1 del buf1 buf7 = empty_strided_cpu((4, ), (1, ), torch.int64) buf7.copy_(buf6) return (buf5, buf7, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SqueezeEmbedding(nn.Module): """ Squeeze sequence embedding length to the longest one in the batch """ def __init__(self, batch_first=True): super(SqueezeEmbedding, self).__init__() self.batch_first = batch_first def forward(self, x, x_len): """ sequence -> sort -> pad and pack -> unpack ->unsort :param x: sequence embedding vectors :param x_len: numpy/tensor list :return: """ """sort""" x_sort_idx = torch.sort(-x_len)[1].long() x_unsort_idx = torch.sort(x_sort_idx)[1].long() x_len = x_len[x_sort_idx] x = x[x_sort_idx] """pack""" x_emb_p = torch.nn.utils.rnn.pack_padded_sequence(x, x_len.cpu(), batch_first=self.batch_first) """unpack: out""" out = torch.nn.utils.rnn.pad_packed_sequence(x_emb_p, batch_first= self.batch_first) out = out[0] """unsort""" out = out[x_unsort_idx] return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_index_neg_sort_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = -tmp0 tmp2 = r0 tmp3 = tmp2.to(tl.int16) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) _tmp6, tmp7 = triton_helpers.sort_with_index(tmp4, tmp5, None, 1, stable=False, descending=False) tmp8 = tmp7.to(tl.int64) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) _tmp10, tmp11 = triton_helpers.sort_with_index(tmp9, tmp5, None, 1, stable=False, descending=False) tmp12 = tmp11.to(tl.int64) tmp13 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp14 = tmp8 + tmp13 tmp15 = tmp8 < 0 tmp16 = tl.where(tmp15, tmp14, tmp8) tl.device_assert((0 <= tmp16) & (tmp16 < 4), 'index out of bounds: 0 <= tmp16 < 4') tmp18 = tl.load(in_ptr0 + tmp16, None, eviction_policy='evict_last') tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None) tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None) tl.store(out_ptr3 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp18, None) @triton.jit def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK], 4, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask, 'index out of bounds: 0 <= tmp5 < 4') tmp7 = tl.load(in_ptr1 + (x0 + 64 * tmp5), xmask) tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4,), (1,)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4,), (1,), torch.int16) buf4 = empty_strided_cuda((4,), (1,), torch.int64) buf6 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_per_fused_index_neg_sort_0[grid(1)](arg0_1, buf1, buf4, buf6, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_index_1[grid(256)](buf1, arg1_1, buf5, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg1_1 del buf1 buf7 = empty_strided_cpu((4,), (1,), torch.int64) buf7.copy_(buf6) return buf5, buf7, buf4 class SqueezeEmbeddingNew(nn.Module): """ Squeeze sequence embedding length to the longest one in the batch """ def __init__(self, batch_first=True): super(SqueezeEmbeddingNew, self).__init__() self.batch_first = batch_first def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
froth-synthesio/PyABSA
SqueezeEmbedding
false
15,367
[ "MIT" ]
199
61406e7a49f93f6c986dfd7e583d730b69c2861c
https://github.com/froth-synthesio/PyABSA/tree/61406e7a49f93f6c986dfd7e583d730b69c2861c
period_L1
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/by/cbypavs4naefh5pdp3dji4o4knrjkxq7vau3edw35xb45cpqw7da.py # Topologically Sorted Source Nodes: [dt, sub_1, remainder, sub_2, dt_1, loss], Original ATen: [aten.sub, aten.remainder, aten.abs, aten.sum] # Source node to ATen node mapping: # dt => sub # dt_1 => abs_1 # loss => sum_1 # remainder => remainder # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, 1.5707963267948966), kwargs = {}) # %remainder : [num_users=1] = call_function[target=torch.ops.aten.remainder.Scalar](args = (%sub_1, 3.141592653589793), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%remainder, 1.5707963267948966), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_2,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_1,), kwargs = {}) triton_per_fused_abs_remainder_sub_sum_0 = async_compile.triton('triton_per_fused_abs_remainder_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_remainder_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_remainder_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = 1.5707963267948966 tmp4 = tmp2 - tmp3 tmp5 = 3.141592653589793 tmp6 = tmp4 % tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = tmp6 != tmp7 tmp9 = libdevice.signbit(tmp6) if (tmp6).dtype is tl.float32 else tmp6 < 0 tmp10 = libdevice.signbit(tmp5) if (tmp5).dtype is tl.float32 else tmp5 < 0 tmp11 = tmp9 != tmp10 tmp12 = tmp8 & tmp11 tmp13 = tmp6 + tmp5 tmp14 = tl.where(tmp12, tmp13, tmp6) tmp15 = tmp14 - tmp3 tmp16 = tl_math.abs(tmp15) tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [dt, sub_1, remainder, sub_2, dt_1, loss], Original ATen: [aten.sub, aten.remainder, aten.abs, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_abs_remainder_sub_sum_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class period_L1(nn.Module): def __init__(self, reduction='sum'): """ periodic Squared Error """ super().__init__() self.reduction = reduction def forward(self, theta_pred, theta_gt): dt = theta_pred - theta_gt dt = torch.abs(torch.remainder(dt - np.pi / 2, np.pi) - np.pi / 2) assert (dt >= 0).all() if self.reduction == 'sum': loss = dt.sum() elif self.reduction == 'mean': loss = dt.mean() elif self.reduction == 'none': loss = dt return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_remainder_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = 1.5707963267948966 tmp4 = tmp2 - tmp3 tmp5 = 3.141592653589793 tmp6 = tmp4 % tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = tmp6 != tmp7 tmp9 = libdevice.signbit(tmp6) if tmp6.dtype is tl.float32 else tmp6 < 0 tmp10 = libdevice.signbit(tmp5) if tmp5.dtype is tl.float32 else tmp5 < 0 tmp11 = tmp9 != tmp10 tmp12 = tmp8 & tmp11 tmp13 = tmp6 + tmp5 tmp14 = tl.where(tmp12, tmp13, tmp6) tmp15 = tmp14 - tmp3 tmp16 = tl_math.abs(tmp15) tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_abs_remainder_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class period_L1New(nn.Module): def __init__(self, reduction='sum'): """ periodic Squared Error """ super().__init__() self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
flytocc/RAPiD
period_L1
false
15,368
[ "MIT" ]
142
92e6a44b8a0107def055e93c971d78fd548562f8
https://github.com/flytocc/RAPiD/tree/92e6a44b8a0107def055e93c971d78fd548562f8
ConvReLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xs/cxs2a7zwcw5yxvn445xldhvii7772mtsthpxnfawxoahvyf3vtaj.py # Topologically Sorted Source Nodes: [t], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # t => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 2, 1, 2], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 7) % 7 x0 = xindex % 7 x2 = (xindex // 49) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yz/cyzuwjibdxr5pe73bofingwgkqeahtcqwszyyfkaocyraiyyc6i3.py # Topologically Sorted Source Nodes: [t_1, leaky_relu], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # leaky_relu => gt, mul, where # t_1 => convolution # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [t], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [t_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [t_1, leaky_relu], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_3, buf2, buf3, 256, grid=grid(256), stream=stream0) del buf1 del primals_3 return (buf3, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import LeakyReLU class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvReLU(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, leaky_relu_neg_slope=0.1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride) self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope) def forward(self, x: 'torch.Tensor'): t = self.pad(x) t = self.conv(t) return self.leaky_relu(t) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn.functional as F from torch.nn import Conv2d from torch.nn import LeakyReLU assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1, primals_3, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 return buf3, primals_2, buf0, buf2 class PadSameConv2d(torch.nn.Module): def __init__(self, kernel_size, stride=1): """ Imitates padding_mode="same" from tensorflow. :param kernel_size: Kernelsize of the convolution, int or tuple/list :param stride: Stride of the convolution, int or tuple/list """ super().__init__() if isinstance(kernel_size, (tuple, list)): self.kernel_size_y = kernel_size[0] self.kernel_size_x = kernel_size[1] else: self.kernel_size_y = kernel_size self.kernel_size_x = kernel_size if isinstance(stride, (tuple, list)): self.stride_y = stride[0] self.stride_x = stride[1] else: self.stride_y = stride self.stride_x = stride def forward(self, x: 'torch.Tensor'): _, _, height, width = x.shape padding_y = (self.stride_y * (math.ceil(height / self.stride_y) - 1 ) + self.kernel_size_y - height) / 2 padding_x = (self.stride_x * (math.ceil(width / self.stride_x) - 1) + self.kernel_size_x - width) / 2 padding = [math.floor(padding_x), math.ceil(padding_x), math.floor( padding_y), math.ceil(padding_y)] return F.pad(input=x, pad=padding) class ConvReLUNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, leaky_relu_neg_slope=0.1): """ Performs two convolutions and a leaky relu. The first operation only convolves in y direction, the second one only in x direction. :param in_channels: Number of input channels :param out_channels: Number of output channels :param kernel_size: Kernel size for the convolutions, first in y direction, then in x direction :param stride: Stride for the convolutions, first in y direction, then in x direction """ super().__init__() self.pad = PadSameConv2d(kernel_size=kernel_size, stride=stride) self.conv = Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride) self.leaky_relu = LeakyReLU(negative_slope=leaky_relu_neg_slope) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fish258/MonoRec
ConvReLU
false
15,369
[ "MIT" ]
388
c0612d2710802004cdd83205e63d0582de543c41
https://github.com/fish258/MonoRec/tree/c0612d2710802004cdd83205e63d0582de543c41
Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3r/c3rfy3ljjc2bfodnr5gm65jr7ew6v6kno6w6jzahlupuqxbpvfkw.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/aw/cawvwx3nv7ipnpnf2hcgwz5usu7vsw5yynj5ofrunhktjwqff5vq.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p5/cp5wuljbdcz2dl2xvl4imkn5wmtmrnbb7mnld5glztiqavldlheh.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vv/cvvhis67uzj3m3ebbd4sgghaemqhihabasphltk5wytqdd6fe74t.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lw/clwfsjrjxeb2gmxy5p3lplvcrvrn37iuw4atjria32bxp2jajrtc.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5y/c5yhyv7emyc7i2ozpvns6tsiqcvdzktqqpohy4sedfe7aihkojch.py # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => var_mean_1 # x_1 => add_2 # x_3 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xj/cxjpr2ute76xkk7edg7qlvolks2ggx2xwbrttteralhmvd2xsktw.py # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2 # x_1 => add_2 # x_3 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_7), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_8), kwargs = {}) triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/b4/cb43jhxvcrefkhdp7ixdoh6nmvez5h55vhlzkxtasuovu5ru7pe5.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_5 => add_6, erf, mul_5, mul_6, mul_7 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_6), kwargs = {}) triton_poi_fused_gelu_10 = async_compile.triton('triton_poi_fused_gelu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pu/cpuql3oz4hmaygynopg7lq7xhfiv7hr7pr4vyzhfpmw34jymdp7q.py # Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add] # Source node to ATen node mapping: # x_1 => add_2 # x_3 => add_3 # x_9 => add_7 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_15), kwargs = {}) triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16, ), (1, )) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf3, buf9, 16, 4, grid=grid(16, 4), stream=stream0) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1; del buf1 # reuse buf14 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf12, primals_6, buf13, buf14, 16, grid=grid(16), stream=stream0) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_9.run(primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, grid=grid(64), stream=stream0) del buf13 del buf14 del primals_8 buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu] triton_poi_fused_gelu_10.run(buf16, buf17, 256, grid=grid(256), stream=stream0) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add] triton_poi_fused_add_11.run(buf19, primals_3, buf12, primals_6, primals_12, 64, grid=grid(64), stream=stream0) del primals_12 return (buf19, buf8, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale if mask is not None: mask = mask.bool() attn = attn.masked_fill(~mask[:, None, None, :], float('-inf')) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x, attn class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, mask=None): _x, attn = self.attn(self.norm1(x), mask=mask) x = x + self.drop_path(_x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x, attn def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16,), (1,)) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12, primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_8 buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0) del buf7 extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_gelu_10[grid(256)](buf16, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12, primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return buf19, buf8, primals_3, primals_6, primals_7, reinterpret_tensor( buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0 ), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4 class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale if mask is not None: mask = mask.bool() attn = attn.masked_fill(~mask[:, None, None, :], float('-inf')) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x, attn class BlockNew(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, input_0): primals_1 = self.norm1.weight primals_2 = self.norm1.bias primals_4 = self.attn.qkv.weight primals_5 = self.attn.proj.weight primals_6 = self.attn.proj.bias primals_7 = self.norm2.weight primals_8 = self.norm2.bias primals_9 = self.mlp.fc1.weight primals_10 = self.mlp.fc1.bias primals_11 = self.mlp.fc2.weight primals_12 = self.mlp.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1]
fiveflowers/ViLT
Block
false
15,370
[ "Apache-2.0" ]
587
762fd3975c180db6fc88f577cf39549983fa373a
https://github.com/fiveflowers/ViLT/tree/762fd3975c180db6fc88f577cf39549983fa373a
ATLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/eg/cegsur5rd65bizi7tgjnih642dflvizbitzesvmbxniphjgftt3z.py # Topologically Sorted Source Nodes: [y_pred_neg_1, neg_loss, y_pred_pos_1, pos_loss], Original ATen: [aten.cat, aten.logsumexp] # Source node to ATen node mapping: # neg_loss => abs_1, amax, eq, exp, full_default_1, sub_4, sum_1, where # pos_loss => amax_1 # y_pred_neg_1 => cat # y_pred_pos_1 => cat_1 # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sub_1, %full_default], -1), kwargs = {}) # %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%cat, [-1], True), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax,), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_1, inf), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %amax), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1]), kwargs = {}) # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sub_3, %full_default], -1), kwargs = {}) # %amax_1 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%cat_1, [-1], True), kwargs = {}) triton_poi_fused_cat_logsumexp_0 = async_compile.triton('triton_poi_fused_cat_logsumexp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_logsumexp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_logsumexp_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.full([1], 0, tl.int64) tmp1 = tmp0 >= tmp0 tmp2 = tl.full([1], 4, tl.int64) tmp3 = tmp0 < tmp2 tmp4 = tl.load(in_ptr0 + ((4*x0) + 0), tmp3 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp9 = tl.load(in_ptr1 + ((4*x0) + 0), tmp3 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 * tmp9 tmp11 = 1000000000000.0 tmp12 = tmp4 * tmp11 tmp13 = tmp10 - tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp3, tmp13, tmp14) tmp16 = tmp0 >= tmp2 tmp17 = tl.full([1], 5, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = 0.0 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp16, tmp19, tmp20) tmp22 = tl.where(tmp3, tmp15, tmp21) tmp23 = tl.full([1], 1, tl.int64) tmp24 = tmp23 >= tmp0 tmp25 = tmp23 < tmp2 tmp26 = tl.load(in_ptr0 + ((4*x0) + 1), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp27 = tmp26 * tmp5 tmp28 = tmp7 - tmp27 tmp29 = tl.load(in_ptr1 + ((4*x0) + 1), tmp25 & xmask, eviction_policy='evict_last', other=0.0) tmp30 = tmp28 * tmp29 tmp31 = tmp26 * tmp11 tmp32 = tmp30 - tmp31 tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp25, tmp32, tmp33) tmp35 = tmp23 >= tmp2 tmp36 = tmp23 < tmp17 tmp37 = tl.where(tmp35, tmp19, tmp20) tmp38 = tl.where(tmp25, tmp34, tmp37) tmp39 = triton_helpers.maximum(tmp22, tmp38) tmp40 = tl.full([1], 2, tl.int64) tmp41 = tmp40 >= tmp0 tmp42 = tmp40 < tmp2 tmp43 = tl.load(in_ptr0 + ((4*x0) + 2), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp44 = tmp43 * tmp5 tmp45 = tmp7 - tmp44 tmp46 = tl.load(in_ptr1 + ((4*x0) + 2), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp45 * tmp46 tmp48 = tmp43 * tmp11 tmp49 = tmp47 - tmp48 tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp42, tmp49, tmp50) tmp52 = tmp40 >= tmp2 tmp53 = tmp40 < tmp17 tmp54 = tl.where(tmp52, tmp19, tmp20) tmp55 = tl.where(tmp42, tmp51, tmp54) tmp56 = triton_helpers.maximum(tmp39, tmp55) tmp57 = tl.full([1], 3, tl.int64) tmp58 = tmp57 >= tmp0 tmp59 = tmp57 < tmp2 tmp60 = tl.load(in_ptr0 + ((4*x0) + 3), tmp59 & xmask, eviction_policy='evict_last', other=0.0) tmp61 = tmp60 * tmp5 tmp62 = tmp7 - tmp61 tmp63 = tl.load(in_ptr1 + ((4*x0) + 3), tmp59 & xmask, eviction_policy='evict_last', other=0.0) tmp64 = tmp62 * tmp63 tmp65 = tmp60 * tmp11 tmp66 = tmp64 - tmp65 tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype) tmp68 = tl.where(tmp59, tmp66, tmp67) tmp69 = tmp57 >= tmp2 tmp70 = tmp57 < tmp17 tmp71 = tl.where(tmp69, tmp19, tmp20) tmp72 = tl.where(tmp59, tmp68, tmp71) tmp73 = triton_helpers.maximum(tmp56, tmp72) tmp74 = tmp2 >= tmp0 tmp75 = tmp2 < tmp2 tmp76 = tl.load(in_ptr0 + ((4*x0) + 4), tmp75 & xmask, eviction_policy='evict_last', other=0.0) tmp77 = tmp76 * tmp5 tmp78 = tmp7 - tmp77 tmp79 = tl.load(in_ptr1 + ((4*x0) + 4), tmp75 & xmask, eviction_policy='evict_last', other=0.0) tmp80 = tmp78 * tmp79 tmp81 = tmp76 * tmp11 tmp82 = tmp80 - tmp81 tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype) tmp84 = tl.where(tmp75, tmp82, tmp83) tmp85 = tmp2 >= tmp2 tmp86 = tmp2 < tmp17 tmp87 = tl.where(tmp85, tmp19, tmp20) tmp88 = tl.where(tmp75, tmp84, tmp87) tmp89 = triton_helpers.maximum(tmp73, tmp88) tmp90 = tl_math.abs(tmp89) tmp91 = float("inf") tmp92 = tmp90 == tmp91 tmp93 = tl.where(tmp92, tmp19, tmp89) tmp94 = tmp22 - tmp93 tmp95 = tl_math.exp(tmp94) tmp96 = tmp38 - tmp93 tmp97 = tl_math.exp(tmp96) tmp98 = tmp95 + tmp97 tmp99 = tmp55 - tmp93 tmp100 = tl_math.exp(tmp99) tmp101 = tmp98 + tmp100 tmp102 = tmp72 - tmp93 tmp103 = tl_math.exp(tmp102) tmp104 = tmp101 + tmp103 tmp105 = tmp88 - tmp93 tmp106 = tl_math.exp(tmp105) tmp107 = tmp104 + tmp106 tmp108 = tmp7 - tmp4 tmp109 = tmp108 * tmp11 tmp110 = tmp10 - tmp109 tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype) tmp112 = tl.where(tmp3, tmp110, tmp111) tmp113 = tl.where(tmp3, tmp112, tmp21) tmp114 = tmp7 - tmp26 tmp115 = tmp114 * tmp11 tmp116 = tmp30 - tmp115 tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype) tmp118 = tl.where(tmp25, tmp116, tmp117) tmp119 = tl.where(tmp25, tmp118, tmp37) tmp120 = triton_helpers.maximum(tmp113, tmp119) tmp121 = tmp7 - tmp43 tmp122 = tmp121 * tmp11 tmp123 = tmp47 - tmp122 tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype) tmp125 = tl.where(tmp42, tmp123, tmp124) tmp126 = tl.where(tmp42, tmp125, tmp54) tmp127 = triton_helpers.maximum(tmp120, tmp126) tmp128 = tmp7 - tmp60 tmp129 = tmp128 * tmp11 tmp130 = tmp64 - tmp129 tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype) tmp132 = tl.where(tmp59, tmp130, tmp131) tmp133 = tl.where(tmp59, tmp132, tmp71) tmp134 = triton_helpers.maximum(tmp127, tmp133) tmp135 = tmp7 - tmp76 tmp136 = tmp135 * tmp11 tmp137 = tmp80 - tmp136 tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype) tmp139 = tl.where(tmp75, tmp137, tmp138) tmp140 = tl.where(tmp75, tmp139, tmp87) tmp141 = triton_helpers.maximum(tmp134, tmp140) tl.store(out_ptr0 + (x0), tmp89, xmask) tl.store(out_ptr1 + (x0), tmp107, xmask) tl.store(out_ptr2 + (x0), tmp141, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yw/cywd2bvrvwed5f645rgsp4yuu2mcf5epm445v4f6r5tltm2mjdtl.py # Topologically Sorted Source Nodes: [y_pred_pos_1, pos_loss], Original ATen: [aten.cat, aten.logsumexp] # Source node to ATen node mapping: # pos_loss => abs_2, eq_1, exp_1, full_default_2, sub_5, where_1 # y_pred_pos_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%sub_3, %full_default], -1), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_1,), kwargs = {}) # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_2, inf), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_2, %amax_1), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat_1, %where_1), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_5,), kwargs = {}) triton_poi_fused_cat_logsumexp_1 = async_compile.triton('triton_poi_fused_cat_logsumexp_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_logsumexp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_logsumexp_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp25 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp10 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 * tmp10 tmp12 = tmp8 - tmp5 tmp13 = 1000000000000.0 tmp14 = tmp12 * tmp13 tmp15 = tmp11 - tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp4, tmp15, tmp16) tmp18 = tmp0 >= tmp3 tmp19 = tl.full([1], 5, tl.int64) tmp20 = tmp0 < tmp19 tmp21 = 0.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp18, tmp21, tmp22) tmp24 = tl.where(tmp4, tmp17, tmp23) tmp26 = tl_math.abs(tmp25) tmp27 = float("inf") tmp28 = tmp26 == tmp27 tmp29 = tl.where(tmp28, tmp21, tmp25) tmp30 = tmp24 - tmp29 tmp31 = tl_math.exp(tmp30) tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fv/cfvqt2jiclilpqddmsi6syfkssqj545ijisiuyasuddj5zaasfo4.py # Topologically Sorted Source Nodes: [neg_loss, pos_loss, add, loss, loss_1], Original ATen: [aten.logsumexp, aten.add, aten.mean] # Source node to ATen node mapping: # add => add_2 # loss => mean # loss_1 => mean_1 # neg_loss => add, log # pos_loss => add_1, log_1, sum_2 # Graph fragment: # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %squeeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1]), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_1, %squeeze_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %add_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_2,), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mean,), kwargs = {}) triton_per_fused_add_logsumexp_mean_2 = async_compile.triton('triton_per_fused_add_logsumexp_mean_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_logsumexp_mean_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_logsumexp_mean_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp9 = tl.load(in_ptr2 + (5*r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + (5*r0)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (2 + (5*r0)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (3 + (5*r0)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (4 + (5*r0)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr3 + (r0), None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.abs(tmp2) tmp4 = float("inf") tmp5 = tmp3 == tmp4 tmp6 = 0.0 tmp7 = tl.where(tmp5, tmp6, tmp2) tmp8 = tmp1 + tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp18 = tl_math.log(tmp17) tmp20 = tl_math.abs(tmp19) tmp21 = tmp20 == tmp4 tmp22 = tl.where(tmp21, tmp6, tmp19) tmp23 = tmp18 + tmp22 tmp24 = tmp8 + tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 64.0 tmp29 = tmp27 / tmp28 tmp30 = 1.0 tmp31 = tmp29 / tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp31, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [y_pred_neg_1, neg_loss, y_pred_pos_1, pos_loss], Original ATen: [aten.cat, aten.logsumexp] stream0 = get_raw_stream(0) triton_poi_fused_cat_logsumexp_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [y_pred_pos_1, pos_loss], Original ATen: [aten.cat, aten.logsumexp] triton_poi_fused_cat_logsumexp_1.run(arg1_1, arg0_1, buf2, buf3, 320, grid=grid(320), stream=stream0) del arg0_1 del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [neg_loss, pos_loss, add, loss, loss_1], Original ATen: [aten.logsumexp, aten.add, aten.mean] triton_per_fused_add_logsumexp_mean_2.run(buf5, buf1, buf0, buf3, buf2, 1, 64, grid=grid(1), stream=stream0) del buf0 del buf1 del buf2 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def multilabel_categorical_crossentropy(y_pred, y_true): y_pred = (1 - 2 * y_true) * y_pred y_pred_neg = y_pred - y_true * 1000000000000.0 y_pred_pos = y_pred - (1 - y_true) * 1000000000000.0 zeros = torch.zeros_like(y_pred[..., :1]) y_pred_neg = torch.cat([y_pred_neg, zeros], dim=-1) y_pred_pos = torch.cat([y_pred_pos, zeros], dim=-1) neg_loss = torch.logsumexp(y_pred_neg, dim=-1) pos_loss = torch.logsumexp(y_pred_pos, dim=-1) return (neg_loss + pos_loss).mean() class ATLoss(nn.Module): def __init__(self): super().__init__() def forward(self, logits, labels): loss = multilabel_categorical_crossentropy(labels, logits) loss = loss.mean() return loss def get_label(self, logits, num_labels=-1): th_logit = torch.zeros_like(logits[..., :1]) output = torch.zeros_like(logits) mask = logits > th_logit if num_labels > 0: top_v, _ = torch.topk(logits, num_labels, dim=1) top_v = top_v[:, -1] mask = (logits >= top_v.unsqueeze(1)) & mask output[mask] = 1.0 output[:, 0] = output[:, 1:].sum(1) == 0.0 return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_logsumexp_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.full([1], 0, tl.int64) tmp2 = tl.full([1], 4, tl.int64) tmp3 = tmp0 < tmp2 tmp4 = tl.load(in_ptr0 + (4 * x0 + 0), tmp3 & xmask, eviction_policy= 'evict_last', other=0.0) tmp5 = 2.0 tmp6 = tmp4 * tmp5 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp9 = tl.load(in_ptr1 + (4 * x0 + 0), tmp3 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tmp8 * tmp9 tmp11 = 1000000000000.0 tmp12 = tmp4 * tmp11 tmp13 = tmp10 - tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp3, tmp13, tmp14) tmp16 = tmp0 >= tmp2 tl.full([1], 5, tl.int64) tmp19 = 0.0 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp16, tmp19, tmp20) tmp22 = tl.where(tmp3, tmp15, tmp21) tmp23 = tl.full([1], 1, tl.int64) tmp25 = tmp23 < tmp2 tmp26 = tl.load(in_ptr0 + (4 * x0 + 1), tmp25 & xmask, eviction_policy= 'evict_last', other=0.0) tmp27 = tmp26 * tmp5 tmp28 = tmp7 - tmp27 tmp29 = tl.load(in_ptr1 + (4 * x0 + 1), tmp25 & xmask, eviction_policy= 'evict_last', other=0.0) tmp30 = tmp28 * tmp29 tmp31 = tmp26 * tmp11 tmp32 = tmp30 - tmp31 tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp25, tmp32, tmp33) tmp35 = tmp23 >= tmp2 tmp37 = tl.where(tmp35, tmp19, tmp20) tmp38 = tl.where(tmp25, tmp34, tmp37) tmp39 = triton_helpers.maximum(tmp22, tmp38) tmp40 = tl.full([1], 2, tl.int64) tmp42 = tmp40 < tmp2 tmp43 = tl.load(in_ptr0 + (4 * x0 + 2), tmp42 & xmask, eviction_policy= 'evict_last', other=0.0) tmp44 = tmp43 * tmp5 tmp45 = tmp7 - tmp44 tmp46 = tl.load(in_ptr1 + (4 * x0 + 2), tmp42 & xmask, eviction_policy= 'evict_last', other=0.0) tmp47 = tmp45 * tmp46 tmp48 = tmp43 * tmp11 tmp49 = tmp47 - tmp48 tmp50 = tl.full(tmp49.shape, 0.0, tmp49.dtype) tmp51 = tl.where(tmp42, tmp49, tmp50) tmp52 = tmp40 >= tmp2 tmp54 = tl.where(tmp52, tmp19, tmp20) tmp55 = tl.where(tmp42, tmp51, tmp54) tmp56 = triton_helpers.maximum(tmp39, tmp55) tmp57 = tl.full([1], 3, tl.int64) tmp59 = tmp57 < tmp2 tmp60 = tl.load(in_ptr0 + (4 * x0 + 3), tmp59 & xmask, eviction_policy= 'evict_last', other=0.0) tmp61 = tmp60 * tmp5 tmp62 = tmp7 - tmp61 tmp63 = tl.load(in_ptr1 + (4 * x0 + 3), tmp59 & xmask, eviction_policy= 'evict_last', other=0.0) tmp64 = tmp62 * tmp63 tmp65 = tmp60 * tmp11 tmp66 = tmp64 - tmp65 tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype) tmp68 = tl.where(tmp59, tmp66, tmp67) tmp69 = tmp57 >= tmp2 tmp71 = tl.where(tmp69, tmp19, tmp20) tmp72 = tl.where(tmp59, tmp68, tmp71) tmp73 = triton_helpers.maximum(tmp56, tmp72) tmp75 = tmp2 < tmp2 tmp76 = tl.load(in_ptr0 + (4 * x0 + 4), tmp75 & xmask, eviction_policy= 'evict_last', other=0.0) tmp77 = tmp76 * tmp5 tmp78 = tmp7 - tmp77 tmp79 = tl.load(in_ptr1 + (4 * x0 + 4), tmp75 & xmask, eviction_policy= 'evict_last', other=0.0) tmp80 = tmp78 * tmp79 tmp81 = tmp76 * tmp11 tmp82 = tmp80 - tmp81 tmp83 = tl.full(tmp82.shape, 0.0, tmp82.dtype) tmp84 = tl.where(tmp75, tmp82, tmp83) tmp85 = tmp2 >= tmp2 tmp87 = tl.where(tmp85, tmp19, tmp20) tmp88 = tl.where(tmp75, tmp84, tmp87) tmp89 = triton_helpers.maximum(tmp73, tmp88) tmp90 = tl_math.abs(tmp89) tmp91 = float('inf') tmp92 = tmp90 == tmp91 tmp93 = tl.where(tmp92, tmp19, tmp89) tmp94 = tmp22 - tmp93 tmp95 = tl_math.exp(tmp94) tmp96 = tmp38 - tmp93 tmp97 = tl_math.exp(tmp96) tmp98 = tmp95 + tmp97 tmp99 = tmp55 - tmp93 tmp100 = tl_math.exp(tmp99) tmp101 = tmp98 + tmp100 tmp102 = tmp72 - tmp93 tmp103 = tl_math.exp(tmp102) tmp104 = tmp101 + tmp103 tmp105 = tmp88 - tmp93 tmp106 = tl_math.exp(tmp105) tmp107 = tmp104 + tmp106 tmp108 = tmp7 - tmp4 tmp109 = tmp108 * tmp11 tmp110 = tmp10 - tmp109 tmp111 = tl.full(tmp110.shape, 0.0, tmp110.dtype) tmp112 = tl.where(tmp3, tmp110, tmp111) tmp113 = tl.where(tmp3, tmp112, tmp21) tmp114 = tmp7 - tmp26 tmp115 = tmp114 * tmp11 tmp116 = tmp30 - tmp115 tmp117 = tl.full(tmp116.shape, 0.0, tmp116.dtype) tmp118 = tl.where(tmp25, tmp116, tmp117) tmp119 = tl.where(tmp25, tmp118, tmp37) tmp120 = triton_helpers.maximum(tmp113, tmp119) tmp121 = tmp7 - tmp43 tmp122 = tmp121 * tmp11 tmp123 = tmp47 - tmp122 tmp124 = tl.full(tmp123.shape, 0.0, tmp123.dtype) tmp125 = tl.where(tmp42, tmp123, tmp124) tmp126 = tl.where(tmp42, tmp125, tmp54) tmp127 = triton_helpers.maximum(tmp120, tmp126) tmp128 = tmp7 - tmp60 tmp129 = tmp128 * tmp11 tmp130 = tmp64 - tmp129 tmp131 = tl.full(tmp130.shape, 0.0, tmp130.dtype) tmp132 = tl.where(tmp59, tmp130, tmp131) tmp133 = tl.where(tmp59, tmp132, tmp71) tmp134 = triton_helpers.maximum(tmp127, tmp133) tmp135 = tmp7 - tmp76 tmp136 = tmp135 * tmp11 tmp137 = tmp80 - tmp136 tmp138 = tl.full(tmp137.shape, 0.0, tmp137.dtype) tmp139 = tl.where(tmp75, tmp137, tmp138) tmp140 = tl.where(tmp75, tmp139, tmp87) tmp141 = triton_helpers.maximum(tmp134, tmp140) tl.store(out_ptr0 + x0, tmp89, xmask) tl.store(out_ptr1 + x0, tmp107, xmask) tl.store(out_ptr2 + x0, tmp141, xmask) @triton.jit def triton_poi_fused_cat_logsumexp_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp25 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 2.0 tmp7 = tmp5 * tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp10 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 * tmp10 tmp12 = tmp8 - tmp5 tmp13 = 1000000000000.0 tmp14 = tmp12 * tmp13 tmp15 = tmp11 - tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp4, tmp15, tmp16) tmp18 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp21 = 0.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp18, tmp21, tmp22) tmp24 = tl.where(tmp4, tmp17, tmp23) tmp26 = tl_math.abs(tmp25) tmp27 = float('inf') tmp28 = tmp26 == tmp27 tmp29 = tl.where(tmp28, tmp21, tmp25) tmp30 = tmp24 - tmp29 tmp31 = tl_math.exp(tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_per_fused_add_logsumexp_mean_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp9 = tl.load(in_ptr2 + 5 * r0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 5 * r0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (2 + 5 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (3 + 5 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (4 + 5 * r0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr3 + r0, None) tmp1 = tl_math.log(tmp0) tmp3 = tl_math.abs(tmp2) tmp4 = float('inf') tmp5 = tmp3 == tmp4 tmp6 = 0.0 tmp7 = tl.where(tmp5, tmp6, tmp2) tmp8 = tmp1 + tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp18 = tl_math.log(tmp17) tmp20 = tl_math.abs(tmp19) tmp21 = tmp20 == tmp4 tmp22 = tl.where(tmp21, tmp6, tmp19) tmp23 = tmp18 + tmp22 tmp24 = tmp8 + tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.sum(tmp25, 1)[:, None] tmp28 = 64.0 tmp29 = tmp27 / tmp28 tmp30 = 1.0 tmp31 = tmp29 / tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_cat_logsumexp_0[grid(64)](arg1_1, arg0_1, buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32) triton_poi_fused_cat_logsumexp_1[grid(320)](arg1_1, arg0_1, buf2, buf3, 320, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_add_logsumexp_mean_2[grid(1)](buf5, buf1, buf0, buf3, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 del buf3 return buf5, def multilabel_categorical_crossentropy(y_pred, y_true): y_pred = (1 - 2 * y_true) * y_pred y_pred_neg = y_pred - y_true * 1000000000000.0 y_pred_pos = y_pred - (1 - y_true) * 1000000000000.0 zeros = torch.zeros_like(y_pred[..., :1]) y_pred_neg = torch.cat([y_pred_neg, zeros], dim=-1) y_pred_pos = torch.cat([y_pred_pos, zeros], dim=-1) neg_loss = torch.logsumexp(y_pred_neg, dim=-1) pos_loss = torch.logsumexp(y_pred_pos, dim=-1) return (neg_loss + pos_loss).mean() class ATLossNew(nn.Module): def __init__(self): super().__init__() def get_label(self, logits, num_labels=-1): th_logit = torch.zeros_like(logits[..., :1]) output = torch.zeros_like(logits) mask = logits > th_logit if num_labels > 0: top_v, _ = torch.topk(logits, num_labels, dim=1) top_v = top_v[:, -1] mask = (logits >= top_v.unsqueeze(1)) & mask output[mask] = 1.0 output[:, 0] = output[:, 1:].sum(1) == 0.0 return output def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
fmc123653/DeepKE
ATLoss
false
15,371
[ "MIT" ]
676
4d30e51368681c7cb73e2ecacf9b922b441cbe99
https://github.com/fmc123653/DeepKE/tree/4d30e51368681c7cb73e2ecacf9b922b441cbe99
GeM
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/oh/cohjjahwtkrnlajxa6hxtauskd3aatopfxnhucy6uaicwrlkpoxo.py # Topologically Sorted Source Nodes: [clamp, pow_1, avg_pool2d, truediv, pow_2], Original ATen: [aten.clamp, aten.pow, aten.avg_pool2d, aten.reciprocal, aten.mul] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # clamp => clamp_min # pow_1 => pow_1 # pow_2 => pow_2 # truediv => mul, reciprocal # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg1_1, 1e-06), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min, %arg0_1), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [4, 4]), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%avg_pool2d, %mul), kwargs = {}) triton_poi_fused_avg_pool2d_clamp_mul_pow_reciprocal_0 = async_compile.triton('triton_poi_fused_avg_pool2d_clamp_mul_pow_reciprocal_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_clamp_mul_pow_reciprocal_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_clamp_mul_pow_reciprocal_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (0)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp42 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp46 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp50 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp54 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp58 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp62 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp1 = 1e-06 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp5 = libdevice.pow(tmp2, tmp4) tmp7 = triton_helpers.maximum(tmp6, tmp1) tmp8 = libdevice.pow(tmp7, tmp4) tmp9 = tmp8 + tmp5 tmp11 = triton_helpers.maximum(tmp10, tmp1) tmp12 = libdevice.pow(tmp11, tmp4) tmp13 = tmp12 + tmp9 tmp15 = triton_helpers.maximum(tmp14, tmp1) tmp16 = libdevice.pow(tmp15, tmp4) tmp17 = tmp16 + tmp13 tmp19 = triton_helpers.maximum(tmp18, tmp1) tmp20 = libdevice.pow(tmp19, tmp4) tmp21 = tmp20 + tmp17 tmp23 = triton_helpers.maximum(tmp22, tmp1) tmp24 = libdevice.pow(tmp23, tmp4) tmp25 = tmp24 + tmp21 tmp27 = triton_helpers.maximum(tmp26, tmp1) tmp28 = libdevice.pow(tmp27, tmp4) tmp29 = tmp28 + tmp25 tmp31 = triton_helpers.maximum(tmp30, tmp1) tmp32 = libdevice.pow(tmp31, tmp4) tmp33 = tmp32 + tmp29 tmp35 = triton_helpers.maximum(tmp34, tmp1) tmp36 = libdevice.pow(tmp35, tmp4) tmp37 = tmp36 + tmp33 tmp39 = triton_helpers.maximum(tmp38, tmp1) tmp40 = libdevice.pow(tmp39, tmp4) tmp41 = tmp40 + tmp37 tmp43 = triton_helpers.maximum(tmp42, tmp1) tmp44 = libdevice.pow(tmp43, tmp4) tmp45 = tmp44 + tmp41 tmp47 = triton_helpers.maximum(tmp46, tmp1) tmp48 = libdevice.pow(tmp47, tmp4) tmp49 = tmp48 + tmp45 tmp51 = triton_helpers.maximum(tmp50, tmp1) tmp52 = libdevice.pow(tmp51, tmp4) tmp53 = tmp52 + tmp49 tmp55 = triton_helpers.maximum(tmp54, tmp1) tmp56 = libdevice.pow(tmp55, tmp4) tmp57 = tmp56 + tmp53 tmp59 = triton_helpers.maximum(tmp58, tmp1) tmp60 = libdevice.pow(tmp59, tmp4) tmp61 = tmp60 + tmp57 tmp63 = triton_helpers.maximum(tmp62, tmp1) tmp64 = libdevice.pow(tmp63, tmp4) tmp65 = tmp64 + tmp61 tmp66 = 0.0625 tmp67 = tmp65 * tmp66 tmp68 = tl.full([1], 1, tl.int32) tmp69 = tmp68 / tmp4 tmp70 = 1.0 tmp71 = tmp69 * tmp70 tmp72 = libdevice.pow(tmp67, tmp71) tl.store(in_out_ptr0 + (x0), tmp72, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (1, ), (1, )) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [clamp, pow_1, avg_pool2d, truediv, pow_2], Original ATen: [aten.clamp, aten.pow, aten.avg_pool2d, aten.reciprocal, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_clamp_mul_pow_reciprocal_0.run(buf1, arg1_1, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class GeM(nn.Module): def __init__(self, p=3, eps=1e-06, requires_grad=False): super(GeM, self).__init__() self.p = nn.Parameter(torch.ones(1) * p, requires_grad=requires_grad) self.eps = eps def forward(self, x): return self.gem(x, p=self.p, eps=self.eps) def gem(self, x, p=3, eps=1e-06): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1)) ).pow(1.0 / p) def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self. p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_clamp_mul_pow_reciprocal_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp38 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp46 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp54 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp58 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp62 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = 1e-06 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp5 = libdevice.pow(tmp2, tmp4) tmp7 = triton_helpers.maximum(tmp6, tmp1) tmp8 = libdevice.pow(tmp7, tmp4) tmp9 = tmp8 + tmp5 tmp11 = triton_helpers.maximum(tmp10, tmp1) tmp12 = libdevice.pow(tmp11, tmp4) tmp13 = tmp12 + tmp9 tmp15 = triton_helpers.maximum(tmp14, tmp1) tmp16 = libdevice.pow(tmp15, tmp4) tmp17 = tmp16 + tmp13 tmp19 = triton_helpers.maximum(tmp18, tmp1) tmp20 = libdevice.pow(tmp19, tmp4) tmp21 = tmp20 + tmp17 tmp23 = triton_helpers.maximum(tmp22, tmp1) tmp24 = libdevice.pow(tmp23, tmp4) tmp25 = tmp24 + tmp21 tmp27 = triton_helpers.maximum(tmp26, tmp1) tmp28 = libdevice.pow(tmp27, tmp4) tmp29 = tmp28 + tmp25 tmp31 = triton_helpers.maximum(tmp30, tmp1) tmp32 = libdevice.pow(tmp31, tmp4) tmp33 = tmp32 + tmp29 tmp35 = triton_helpers.maximum(tmp34, tmp1) tmp36 = libdevice.pow(tmp35, tmp4) tmp37 = tmp36 + tmp33 tmp39 = triton_helpers.maximum(tmp38, tmp1) tmp40 = libdevice.pow(tmp39, tmp4) tmp41 = tmp40 + tmp37 tmp43 = triton_helpers.maximum(tmp42, tmp1) tmp44 = libdevice.pow(tmp43, tmp4) tmp45 = tmp44 + tmp41 tmp47 = triton_helpers.maximum(tmp46, tmp1) tmp48 = libdevice.pow(tmp47, tmp4) tmp49 = tmp48 + tmp45 tmp51 = triton_helpers.maximum(tmp50, tmp1) tmp52 = libdevice.pow(tmp51, tmp4) tmp53 = tmp52 + tmp49 tmp55 = triton_helpers.maximum(tmp54, tmp1) tmp56 = libdevice.pow(tmp55, tmp4) tmp57 = tmp56 + tmp53 tmp59 = triton_helpers.maximum(tmp58, tmp1) tmp60 = libdevice.pow(tmp59, tmp4) tmp61 = tmp60 + tmp57 tmp63 = triton_helpers.maximum(tmp62, tmp1) tmp64 = libdevice.pow(tmp63, tmp4) tmp65 = tmp64 + tmp61 tmp66 = 0.0625 tmp67 = tmp65 * tmp66 tmp68 = tl.full([1], 1, tl.int32) tmp69 = tmp68 / tmp4 tmp70 = 1.0 tmp71 = tmp69 * tmp70 tmp72 = libdevice.pow(tmp67, tmp71) tl.store(in_out_ptr0 + x0, tmp72, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (1,), (1,)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_avg_pool2d_clamp_mul_pow_reciprocal_0[grid(16)](buf1, arg1_1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class GeMNew(nn.Module): def __init__(self, p=3, eps=1e-06, requires_grad=False): super(GeMNew, self).__init__() self.p = nn.Parameter(torch.ones(1) * p, requires_grad=requires_grad) self.eps = eps def gem(self, x, p=3, eps=1e-06): return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1)) ).pow(1.0 / p) def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self. p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')' def forward(self, input_0): arg0_1 = self.p arg1_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
flrngel/DOLG-pytorch
GeM
false
15,372
[ "MIT" ]
56
97732d2932ef6733f17cf8ac1aee990effe6fd64
https://github.com/flrngel/DOLG-pytorch/tree/97732d2932ef6733f17cf8ac1aee990effe6fd64
fusion
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/4c/c4cd7dcsw353pvbq4zksghzkzd7tzo6rz6qui3oacl7o4u2prltz.py # Topologically Sorted Source Nodes: [x123_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # x123_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %view_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2304 x1 = (xindex // 2304) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1536, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 768, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + ((768*x1) + x0), tmp7 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp0 >= tmp5 tmp10 = tmp9 & tmp4 tmp11 = tl.load(in_ptr1 + ((768*x1) + ((-768) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp6, tmp8, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 2304, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tl.load(in_ptr2 + ((768*x1) + ((-1536) + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp4, tmp14, tmp18) tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6q/c6q567bxmyjlgebbvymd32lebt3oyhtpbyxv4hl6hj7rwb7ah4ft.py # Topologically Sorted Source Nodes: [weight1_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # weight1_1 => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 768), (768, 1)) assert_size_stride(primals_2, (4, 768), (768, 1)) assert_size_stride(primals_3, (4, 768), (768, 1)) assert_size_stride(primals_4, (1, 2304), (2304, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (1, 2304), (2304, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (1, 2304), (2304, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2304), (2304, 1), torch.float32) # Topologically Sorted Source Nodes: [x123_1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 9216, grid=grid(9216), stream=stream0) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (2304, 1), (1, 2304), 0), out=buf1) del primals_4 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [weight1_1], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf2, primals_5, 4, grid=grid(4), stream=stream0) del primals_5 return (reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0), buf2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 768), (768, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 768), (768, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 768), (768, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 2304), (2304, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 2304), (2304, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 2304), (2304, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Linear class fusion(nn.Module): def __init__(self, feature_size=768): super(fusion, self).__init__() self.fc1 = Linear(feature_size * 3, 1) self.fc2 = Linear(feature_size * 3, 1) self.fc3 = Linear(feature_size * 3, 1) self.sigmoid = nn.Sigmoid() def forward(self, x1, x2, x3): batch_size = x1.size()[0] x1 = x1.view(-1, 768) x2 = x2.view(-1, 768) x3 = x3.view(-1, 768) x123 = torch.cat((x1, x2), 1) x123 = torch.cat((x123, x3), 1) weight1 = self.fc1(x123) weight2 = self.fc2(x123) weight3 = self.fc3(x123) weight1 = self.sigmoid(weight1) weight2 = self.sigmoid(weight2) weight3 = self.sigmoid(weight3) weight1 = weight1.view(batch_size, -1).unsqueeze(2) weight2 = weight1.view(batch_size, -1).unsqueeze(2) weight3 = weight1.view(batch_size, -1).unsqueeze(2) return weight1, weight2, weight3 def get_inputs(): return [torch.rand([4, 768]), torch.rand([4, 768]), torch.rand([4, 768])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2304 x1 = xindex // 2304 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1536, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 768, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + (768 * x1 + x0), tmp7 & xmask, eviction_policy ='evict_last', other=0.0) tmp9 = tmp0 >= tmp5 tmp10 = tmp9 & tmp4 tmp11 = tl.load(in_ptr1 + (768 * x1 + (-768 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp6, tmp8, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tl.full([1], 2304, tl.int64) tmp18 = tl.load(in_ptr2 + (768 * x1 + (-1536 + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp4, tmp14, tmp18) tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 768), (768, 1)) assert_size_stride(primals_2, (4, 768), (768, 1)) assert_size_stride(primals_3, (4, 768), (768, 1)) assert_size_stride(primals_4, (1, 2304), (2304, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (1, 2304), (2304, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1, 2304), (2304, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2304), (2304, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(9216)](primals_1, primals_2, primals_3, buf0, 9216, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (2304, 1), (1, 2304), 0), out=buf1) del primals_4 buf2 = buf1 del buf1 triton_poi_fused_sigmoid_1[grid(4)](buf2, primals_5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_5 return reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0 ), reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0 ), reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0), buf2, buf0, buf2 class fusionNew(nn.Module): def __init__(self, feature_size=768): super(fusionNew, self).__init__() self.fc1 = Linear(feature_size * 3, 1) self.fc2 = Linear(feature_size * 3, 1) self.fc3 = Linear(feature_size * 3, 1) self.sigmoid = nn.Sigmoid() def forward(self, input_0, input_1, input_2): primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1], output[2]
funnyzhou/REFERS
fusion
false
15,373
[ "MIT" ]
46
392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
https://github.com/funnyzhou/REFERS/tree/392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
LossesOfConVIRT
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qw/cqwyfeiixy3qw6xgovma5gylefrbhgbbhccny2iedjqfv6uujmk5.py # Topologically Sorted Source Nodes: [cosine_similarity_1], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] # Source node to ATen node mapping: # cosine_similarity_1 => clamp_min_2, clamp_min_3, div_3, div_4, mul_1, pow_5, pow_6, pow_7, pow_8, sum_4, sum_5 # Graph fragment: # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%expand_1, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_6, 1e-08), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%expand_1, %clamp_min_2), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%expand, 2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [1], True), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 0.5), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_8, 1e-08), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%expand, %clamp_min_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_4, %div_3), kwargs = {}) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp8 = tl.load(in_ptr0 + (2)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp12 = tl.load(in_ptr0 + (3)) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp20 = tl.load(in_ptr1 + (x2), xmask) tmp21 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = 1e-08 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp0 / tmp18 tmp22 = tmp21 * tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = libdevice.sqrt(tmp31) tmp33 = triton_helpers.maximum(tmp32, tmp17) tmp34 = tmp20 / tmp33 tmp35 = tmp19 * tmp34 tl.store(out_ptr0 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/z2/cz2tlcga5chxnh6c6oo6pt3qbo6ngfoixc2rh4jbfr2vc6425ylr.py # Topologically Sorted Source Nodes: [cosine_similarity, divide, item1, cosine_similarity_1, divide_1, exp_1, item2, divide_2, log, loss], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul, aten.sum, aten.exp, aten.log, aten.neg] # Source node to ATen node mapping: # cosine_similarity => clamp_min, clamp_min_1, div, div_1, mul, pow_1, pow_2, pow_3, pow_4, sum_1, sum_2, sum_3 # cosine_similarity_1 => sum_6 # divide => div_2 # divide_1 => div_5 # divide_2 => div_6 # exp_1 => exp_1 # item1 => exp # item2 => sum_7 # log => log # loss => neg # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [0], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-08), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %clamp_min), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [0], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select_1, %clamp_min_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [0]), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 0.1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_2,), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) # %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, 0.1), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_5,), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%exp_1,), kwargs = {}) # %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_7), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_6,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {}) triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1 = async_compile.triton('triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp21 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = libdevice.sqrt(tmp4) tmp11 = 1e-08 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp0 / tmp12 tmp14 = libdevice.sqrt(tmp9) tmp15 = triton_helpers.maximum(tmp14, tmp11) tmp16 = tmp5 / tmp15 tmp17 = tmp13 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp27 = tmp25 + tmp26 tmp28 = 10.0 tmp29 = tmp27 * tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = tmp20 * tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 / tmp33 tmp37 = tl_math.log(tmp36) tmp38 = -tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp38, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cosine_similarity_1], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0.run(arg0_1, arg1_1, buf3, 16, grid=grid(16), stream=stream0) buf0 = empty_strided_cuda((1, ), (1, ), torch.float32) buf2 = reinterpret_tensor(buf0, (), (), 0); del buf0 # reuse buf5 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [cosine_similarity, divide, item1, cosine_similarity_1, divide_1, exp_1, item2, divide_2, log, loss], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul, aten.sum, aten.exp, aten.log, aten.neg] triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1.run(buf5, arg0_1, arg1_1, buf3, 1, 4, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LossesOfConVIRT(nn.Module): """ """ def __init__(self, tau=0.1, lambd=0.75): super(LossesOfConVIRT, self).__init__() self.tau = tau self.lambd = lambd def tmp_loss(self, v, u, index): """ """ assert v.size(0) == u.size(0) item1 = torch.exp(torch.divide(torch.cosine_similarity(v[index], u[ index], dim=0), self.tau)) item2 = torch.exp(torch.divide(torch.cosine_similarity(v[index]. unsqueeze(0), u, dim=1), self.tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def image_text(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v[index].unsqueeze(0), u), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def text_image(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v, u[index].unsqueeze(0)), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)).item() return loss def forward(self, v, u): """ :return: """ assert v.size(0) == u.size(0) res = 0.0 v = v.float() u = u.float() for i in range(v.size(0)): res += self.lambd * self.image_text(v, u, i) + (1 - self.lambd ) * self.text_image(v, u, i) res /= v.size(0) return res def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + 1) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp8 = tl.load(in_ptr0 + 2) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp12 = tl.load(in_ptr0 + 3) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp20 = tl.load(in_ptr1 + x2, xmask) tmp21 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp2 * tmp2 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = 1e-08 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp0 / tmp18 tmp22 = tmp21 * tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = libdevice.sqrt(tmp31) tmp33 = triton_helpers.maximum(tmp32, tmp17) tmp34 = tmp20 / tmp33 tmp35 = tmp19 * tmp34 tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp21 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = libdevice.sqrt(tmp4) tmp11 = 1e-08 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp0 / tmp12 tmp14 = libdevice.sqrt(tmp9) tmp15 = triton_helpers.maximum(tmp14, tmp11) tmp16 = tmp5 / tmp15 tmp17 = tmp13 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp27 = tmp25 + tmp26 tmp28 = 10.0 tmp29 = tmp27 * tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = tmp20 * tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 / tmp33 tmp37 = tl_math.log(tmp36) tmp38 = -tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(16)]( arg0_1, arg1_1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf0 = empty_strided_cuda((1,), (1,), torch.float32) buf2 = reinterpret_tensor(buf0, (), (), 0) del buf0 buf5 = buf2 del buf2 triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1[ grid(1)](buf5, arg0_1, arg1_1, buf3, 1, 4, XBLOCK=1, num_warps= 2, num_stages=1) del arg0_1 del arg1_1 del buf3 return buf5, class LossesOfConVIRTNew(nn.Module): """ """ def __init__(self, tau=0.1, lambd=0.75): super(LossesOfConVIRTNew, self).__init__() self.tau = tau self.lambd = lambd def tmp_loss(self, v, u, index): """ """ assert v.size(0) == u.size(0) item1 = torch.exp(torch.divide(torch.cosine_similarity(v[index], u[ index], dim=0), self.tau)) item2 = torch.exp(torch.divide(torch.cosine_similarity(v[index]. unsqueeze(0), u, dim=1), self.tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def image_text(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v[index].unsqueeze(0), u), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def text_image(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v, u[index].unsqueeze(0)), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)).item() return loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
funnyzhou/REFERS
LossesOfConVIRT
false
15,374
[ "MIT" ]
46
392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
https://github.com/funnyzhou/REFERS/tree/392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
LocalResponseNormLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6w/c6wgqvz4kdrnqiec7t2vuvmkn46mmwv4k4fko7qcq7iqq7l5jzmk.py # Topologically Sorted Source Nodes: [local_response_norm], Original ATen: [aten.constant_pad_nd, aten.avg_pool3d, aten.mul, aten.add, aten.pow, aten.div] # Source node to ATen node mapping: # local_response_norm => add, avg_pool3d, constant_pad_nd, div, mul_1, pow_1 # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%view, [0, 0, 0, 0, 2, 2], 0.0), kwargs = {}) # %avg_pool3d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool3d.default](args = (%constant_pad_nd, [5, 1, 1], [1, 1, 1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 9.999999747378752e-05), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_1), kwargs = {}) triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x3 = xindex tmp48 = tl.load(in_ptr0 + (x3), xmask) tmp0 = (-2) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-32) + x3), tmp5 & xmask, other=0.0) tmp7 = tmp6 * tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = (-1) + x1 tmp11 = tmp10 >= tmp1 tmp12 = tmp10 < tmp3 tmp13 = tmp11 & tmp12 tmp14 = tl.load(in_ptr0 + ((-16) + x3), tmp13 & xmask, other=0.0) tmp15 = tmp14 * tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp13, tmp15, tmp16) tmp18 = tmp17 + tmp9 tmp19 = x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tl.load(in_ptr0 + (x3), tmp22 & xmask, other=0.0) tmp24 = tmp23 * tmp23 tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp22, tmp24, tmp25) tmp27 = tmp26 + tmp18 tmp28 = 1 + x1 tmp29 = tmp28 >= tmp1 tmp30 = tmp28 < tmp3 tmp31 = tmp29 & tmp30 tmp32 = tl.load(in_ptr0 + (16 + x3), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp35 + tmp27 tmp37 = 2 + x1 tmp38 = tmp37 >= tmp1 tmp39 = tmp37 < tmp3 tmp40 = tmp38 & tmp39 tmp41 = tl.load(in_ptr0 + (32 + x3), tmp40 & xmask, other=0.0) tmp42 = tmp41 * tmp41 tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp40, tmp42, tmp43) tmp45 = tmp44 + tmp36 tmp46 = 0.2 tmp47 = tmp45 * tmp46 tmp49 = 9.999999747378752e-05 tmp50 = tmp47 * tmp49 tmp51 = 1.0 tmp52 = tmp50 + tmp51 tmp53 = 0.75 tmp54 = libdevice.pow(tmp52, tmp53) tmp55 = tmp48 / tmp54 tl.store(in_out_ptr0 + (x3), tmp55, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [local_response_norm], Original ATen: [aten.constant_pad_nd, aten.avg_pool3d, aten.mul, aten.add, aten.pow, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0.run(buf1, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class LocalResponseNormLayer(nn.Module): def forward(self, tensor, size=5, alpha=9.999999747378752e-05, beta= 0.75, k=1.0): return F.local_response_norm(tensor, size=size, alpha=alpha, beta= beta, k=k) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex tmp48 = tl.load(in_ptr0 + x3, xmask) tmp0 = -2 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-32 + x3), tmp5 & xmask, other=0.0) tmp7 = tmp6 * tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = -1 + x1 tmp11 = tmp10 >= tmp1 tmp12 = tmp10 < tmp3 tmp13 = tmp11 & tmp12 tmp14 = tl.load(in_ptr0 + (-16 + x3), tmp13 & xmask, other=0.0) tmp15 = tmp14 * tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp13, tmp15, tmp16) tmp18 = tmp17 + tmp9 tmp19 = x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tl.load(in_ptr0 + x3, tmp22 & xmask, other=0.0) tmp24 = tmp23 * tmp23 tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp22, tmp24, tmp25) tmp27 = tmp26 + tmp18 tmp28 = 1 + x1 tmp29 = tmp28 >= tmp1 tmp30 = tmp28 < tmp3 tmp31 = tmp29 & tmp30 tmp32 = tl.load(in_ptr0 + (16 + x3), tmp31 & xmask, other=0.0) tmp33 = tmp32 * tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp31, tmp33, tmp34) tmp36 = tmp35 + tmp27 tmp37 = 2 + x1 tmp38 = tmp37 >= tmp1 tmp39 = tmp37 < tmp3 tmp40 = tmp38 & tmp39 tmp41 = tl.load(in_ptr0 + (32 + x3), tmp40 & xmask, other=0.0) tmp42 = tmp41 * tmp41 tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype) tmp44 = tl.where(tmp40, tmp42, tmp43) tmp45 = tmp44 + tmp36 tmp46 = 0.2 tmp47 = tmp45 * tmp46 tmp49 = 9.999999747378752e-05 tmp50 = tmp47 * tmp49 tmp51 = 1.0 tmp52 = tmp50 + tmp51 tmp53 = 0.75 tmp54 = libdevice.pow(tmp52, tmp53) tmp55 = tmp48 / tmp54 tl.store(in_out_ptr0 + x3, tmp55, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_avg_pool3d_constant_pad_nd_div_mul_pow_0[grid(256) ](buf1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf1, class LocalResponseNormLayerNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fuzhanrahmanian/lucent
LocalResponseNormLayer
false
15,375
[ "Apache-2.0" ]
449
13b24c3c37784185275da73c7a11095b2ae809c5
https://github.com/fuzhanrahmanian/lucent/tree/13b24c3c37784185275da73c7a11095b2ae809c5
LinearTextualHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [visual_features_2], Original ATen: [aten.mean] # Source node to ATen node mapping: # visual_features_2 => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%permute, [1]), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [visual_features_2], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) del primals_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, buf1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from typing import Optional class TextualHead(nn.Module): """ Base class for all textual heads. All child classes can simply inherit from :class:`~torch.nn.Module`, however this is kept here for uniform type annotations. Parameters ---------- visual_feature_size: int Size (number of channels) of the input features from the visual backbone. vocab_size: int Number of tokens in the output vocabulary. hidden_size: int Size of the token embedding vectors, or hidden state vector of the language model. """ def __init__(self, visual_feature_size: 'int', vocab_size: 'int', hidden_size: 'int'): super().__init__() self.visual_feature_size = visual_feature_size self.vocab_size = vocab_size self.hidden_size = hidden_size @property def textual_feature_size(self): """ Size of the last dimension of output right before the output linear layer (which predicts a distribution over vocabulary tokens). This is typically same as :attr:`hidden_size` for most modules. This property is used to add more modules on top of this. """ return self.hidden_size class LinearTextualHead(TextualHead): """ A textual head containing a single linear layer projecting from the visual feature size to the output vocabulary size. Parameters ---------- visual_feature_size: int Size (number of channels) of the input features from the visual backbone. vocab_size: int Number of tokens in the output vocabulary. """ def __init__(self, visual_feature_size: 'int', vocab_size: 'int', **kwargs ): hidden_size = visual_feature_size super().__init__(visual_feature_size, vocab_size, hidden_size) self.output = nn.Linear(visual_feature_size, vocab_size) def forward(self, visual_features: 'torch.Tensor', caption_tokens: 'Optional[torch.Tensor]'=None, caption_lengths: 'Optional[torch.Tensor]'=None) ->torch.Tensor: """ Project visual features directly to predict a distribution over vocabulary tokens through a single linear layer. This textual head ignores arguments ``caption_tokens`` and ``caption_lengths``, they are here for API consistency. Parameters ---------- visual_features: torch.Tensor A tensor of shape ``(batch_size, channels, height, width)`` containing features from visual backbone. Returns ------- torch.Tensor A tensor of shape ``(batch_size, vocab_size)`` containing output vocabulary logits. """ batch_size, channels, _height, _width = visual_features.size() visual_features = visual_features.view(batch_size, channels, -1) visual_features = visual_features.permute(0, 2, 1) visual_features = visual_features.mean(dim=1) output_logits = self.output(visual_features) return output_logits def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'visual_feature_size': 4, 'vocab_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, buf1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 return buf2, buf1 class TextualHead(nn.Module): """ Base class for all textual heads. All child classes can simply inherit from :class:`~torch.nn.Module`, however this is kept here for uniform type annotations. Parameters ---------- visual_feature_size: int Size (number of channels) of the input features from the visual backbone. vocab_size: int Number of tokens in the output vocabulary. hidden_size: int Size of the token embedding vectors, or hidden state vector of the language model. """ def __init__(self, visual_feature_size: 'int', vocab_size: 'int', hidden_size: 'int'): super().__init__() self.visual_feature_size = visual_feature_size self.vocab_size = vocab_size self.hidden_size = hidden_size @property def textual_feature_size(self): """ Size of the last dimension of output right before the output linear layer (which predicts a distribution over vocabulary tokens). This is typically same as :attr:`hidden_size` for most modules. This property is used to add more modules on top of this. """ return self.hidden_size class LinearTextualHeadNew(TextualHead): """ A textual head containing a single linear layer projecting from the visual feature size to the output vocabulary size. Parameters ---------- visual_feature_size: int Size (number of channels) of the input features from the visual backbone. vocab_size: int Number of tokens in the output vocabulary. """ def __init__(self, visual_feature_size: 'int', vocab_size: 'int', **kwargs ): hidden_size = visual_feature_size super().__init__(visual_feature_size, vocab_size, hidden_size) self.output = nn.Linear(visual_feature_size, vocab_size) def forward(self, input_0): primals_2 = self.output.weight primals_3 = self.output.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
funnyzhou/REFERS
LinearTextualHead
false
15,376
[ "MIT" ]
46
392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
https://github.com/funnyzhou/REFERS/tree/392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/74/c74v7pw3ewucs6uyfyas6jj3smeip3jdxrfqskzmtslaejs2rcdu.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {}) # %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (x2), xmask) tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = float("-inf") tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = (tmp4 != 0) tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = (tmp9 != 0) tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = (tmp15 != 0) tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = (tmp21 != 0) tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nt/cntsccyp35ridrbw5aqmwfrugkskxdz2e5qgs65dfgng226m2a6s.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_2, buf1, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 0, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf2, buf3, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_3, buf5, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 buf6 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), out=buf6) del buf4 buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [attention_output], Original ATen: [aten.clone] triton_poi_fused_3.run(buf6, buf7, 16, 4, grid=grid(16, 4), stream=stream0) buf8 = reinterpret_tensor(buf6, (16, 4), (4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [attention_output_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_4 del primals_5 return (reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class ScaledDotProductAttention(nn.Module): def __init__(self, d_head): super(ScaledDotProductAttention, self).__init__() self.d_head = d_head self.attention_dropout = nn.Dropout(p=0.1) def forward(self, q, k, v, mask=None): attention_weights = torch.matmul(q, k.transpose(-2, -1)) scaled_attention_weights = attention_weights / math.sqrt(self.d_head) if mask is not None: scaled_attention_weights = scaled_attention_weights.masked_fill( mask == 0, float('-inf')) scaled_attention_weights = nn.functional.softmax( scaled_attention_weights, dim=-1) scaled_attention_weights = self.attention_dropout( scaled_attention_weights) weighted_v = torch.matmul(scaled_attention_weights, v) return weighted_v class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads assert d_model % n_heads == 0 self.d_head = d_model // n_heads self.dot_product_attention_layer = ScaledDotProductAttention(self. d_head) self.W_0 = nn.Linear(d_model, d_model) def _split_into_heads(self, q, k, v): q = q.view(q.size(0), q.size(1), self.n_heads, self.d_head) k = k.view(k.size(0), k.size(1), self.n_heads, self.d_head) v = v.view(v.size(0), v.size(1), self.n_heads, self.d_head) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def _concatenate_heads(self, attention_output): attention_output = attention_output.transpose(1, 2).contiguous() attention_output = attention_output.view(attention_output.size(0), attention_output.size(1), -1) return attention_output def forward(self, q, k, v, mask=None): q, k, v = self._split_into_heads(q, k, v) attention_output = self.dot_product_attention_layer(q, k, v, mask) attention_output = self._concatenate_heads(attention_output) attention_output = self.W_0(attention_output) return attention_output def get_inputs(): return [torch.rand([4, 4, 4, 1]), torch.rand([4, 4, 4, 1]), torch.rand( [4, 4, 4, 1])] def get_init_inputs(): return [[], {'d_model': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_0[grid(16, 4)](primals_2, buf1, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 0, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf2, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](primals_3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf6 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), out=buf6) del buf4 buf7 = buf5 del buf5 triton_poi_fused_3[grid(16, 4)](buf6, buf7, 16, 4, XBLOCK=4, YBLOCK =16, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf6, (16, 4), (4, 1), 0) del buf6 extern_kernels.addmm(primals_5, reinterpret_tensor(buf7, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_4 del primals_5 return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf7, (16, 4), (4, 1), 0) class ScaledDotProductAttention(nn.Module): def __init__(self, d_head): super(ScaledDotProductAttention, self).__init__() self.d_head = d_head self.attention_dropout = nn.Dropout(p=0.1) def forward(self, q, k, v, mask=None): attention_weights = torch.matmul(q, k.transpose(-2, -1)) scaled_attention_weights = attention_weights / math.sqrt(self.d_head) if mask is not None: scaled_attention_weights = scaled_attention_weights.masked_fill( mask == 0, float('-inf')) scaled_attention_weights = nn.functional.softmax( scaled_attention_weights, dim=-1) scaled_attention_weights = self.attention_dropout( scaled_attention_weights) weighted_v = torch.matmul(scaled_attention_weights, v) return weighted_v class MultiHeadAttentionNew(nn.Module): def __init__(self, d_model, n_heads): super(MultiHeadAttentionNew, self).__init__() self.n_heads = n_heads assert d_model % n_heads == 0 self.d_head = d_model // n_heads self.dot_product_attention_layer = ScaledDotProductAttention(self. d_head) self.W_0 = nn.Linear(d_model, d_model) def _split_into_heads(self, q, k, v): q = q.view(q.size(0), q.size(1), self.n_heads, self.d_head) k = k.view(k.size(0), k.size(1), self.n_heads, self.d_head) v = v.view(v.size(0), v.size(1), self.n_heads, self.d_head) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def _concatenate_heads(self, attention_output): attention_output = attention_output.transpose(1, 2).contiguous() attention_output = attention_output.view(attention_output.size(0), attention_output.size(1), -1) return attention_output def forward(self, input_0, input_1, input_2): primals_4 = self.W_0.weight primals_5 = self.W_0.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
francismontalbo/attention-is-all-you-need-paper
MultiHeadAttention
false
15,377
[ "MIT" ]
167
21ba3e48917da0c6808126d183bece6a9969cfd2
https://github.com/francismontalbo/attention-is-all-you-need-paper/tree/21ba3e48917da0c6808126d183bece6a9969cfd2
TransformerGPTEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py # Topologically Sorted Source Nodes: [input_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # input_norm => add_1, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_4, [2]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py # Topologically Sorted Source Nodes: [input_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # input_norm => add_1, add_2, mul, mul_1, rsqrt, sub_1, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_4, [2]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_4, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cu/ccutfff4nahrfmpcv6wsifo3asds3rnoszsjaoct5d2c37x6tm6d.py # Topologically Sorted Source Nodes: [query_2, query_key], Original ATen: [aten.div, aten.clone] # Source node to ATen node mapping: # query_2 => div # query_key => clone # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_5, 1.0), kwargs = {}) # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_div_2 = async_compile.triton('triton_poi_fused_clone_div_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3r/c3rsks6vi53ggj2qfjmhu7vc3vqskqtyr7gc4fdp74wzt6pdrjx4.py # Topologically Sorted Source Nodes: [query_key], Original ATen: [aten.clone] # Source node to ATen node mapping: # query_key => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cv/ccvgwkyzxsyhrnehwfpmuuw4demnoemzqjubnz4oyyo2qwj5nftt.py # Topologically Sorted Source Nodes: [add, dec_mask], Original ATen: [aten.add, aten.gt] # Source node to ATen node mapping: # add => add # dec_mask => gt # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {}) triton_poi_fused_add_gt_4 = async_compile.triton('triton_poi_fused_add_gt_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gt_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_gt_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = x0 + ((-1)*x1) tmp2 = tl.full([1], 1, tl.int64) tmp3 = tmp1 >= tmp2 tmp4 = tl.full([1], 1, tl.uint8) tmp5 = tl.full([1], 0, tl.uint8) tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp6.to(tl.float32) tmp8 = tmp0 + tmp7 tmp9 = 0.0 tmp10 = tmp8 > tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/b2/cb2c7vgcdoqxv5o3y5uofbo44hdhjozkwkrjvhvfffedospjv3xq.py # Topologically Sorted Source Nodes: [scores_1, attn], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn => amax, exp, sub_2, sum_1 # scores_1 => full_default, where_1 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_2, %full_default, %view_13), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [-1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_masked_fill_5 = async_compile.triton('triton_poi_fused__softmax_masked_fill_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp5 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp13 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp2 = -9.999999843067494e+17 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp6 = tl.where(tmp4, tmp2, tmp5) tmp7 = triton_helpers.maximum(tmp3, tmp6) tmp10 = tl.where(tmp8, tmp2, tmp9) tmp11 = triton_helpers.maximum(tmp7, tmp10) tmp14 = tl.where(tmp12, tmp2, tmp13) tmp15 = triton_helpers.maximum(tmp11, tmp14) tmp16 = tmp3 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp6 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp10 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + (x3), tmp15, xmask) tl.store(out_ptr1 + (x3), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/an/canq63czo74en7hou2tx2netp4p2cdz64g7nf5tto4ay4yd5und5.py # Topologically Sorted Source Nodes: [scores_1, attn], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn => amax, div_1, exp, sub_2 # scores_1 => full_default, where_1 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_2, %full_default, %view_13), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [-1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_masked_fill_6 = async_compile.triton('triton_poi_fused__softmax_masked_fill_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 64) x4 = xindex % 16 x5 = xindex x6 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x5), xmask) tmp4 = tl.load(in_ptr1 + (x6), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last') tmp2 = -9.999999843067494e+17 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tl.store(in_out_ptr0 + (x5), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lw/clwfsjrjxeb2gmxy5p3lplvcrvrn37iuw4atjria32bxp2jajrtc.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xw/cxwztfhiso3uego7k7zbqdm4uahezg4u5zy7ri26p3g76e2lbxyf.py # Topologically Sorted Source Nodes: [context_1, context_norm], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # context_1 => add_3 # context_norm => var_mean_1 # Graph fragment: # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %primals_4), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/so/csozgpoz3hmv3hrlzcentzlzvi4jqhlqmcwqydcltpnthhqeurgb.py # Topologically Sorted Source Nodes: [context_1, context_norm], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # context_1 => add_3 # context_norm => add_4, add_5, mul_2, mul_3, rsqrt_1, sub_3 # Graph fragment: # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %primals_4), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_13), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_14), kwargs = {}) triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xe/cxedogdpldbmycqxbi2b3rlgfoaiyyb2nqubgrakqvptohmhaui6.py # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add_2, mul_2, tanh, add_3, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] # Source node to ATen node mapping: # add_2 => add_6 # add_3 => add_7 # mul => mul_4 # mul_1 => mul_5 # mul_2 => mul_6 # mul_3 => mul_7 # pow_1 => pow_1 # tanh => tanh # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_22, 0.5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_22, 3), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_22, %mul_5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 0.7978845608028654), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_6,), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %add_7), kwargs = {}) triton_poi_fused_add_mul_pow_tanh_10 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_tanh_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sl/csl7qzdvx4vof5vjl6ew6pcaevy4ipzd3ehhbbv4flbqt4oaolik.py # Topologically Sorted Source Nodes: [context_1, output_1], Original ATen: [aten.add] # Source node to ATen node mapping: # context_1 => add_3 # output_1 => add_8 # Graph fragment: # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %primals_4), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_24, %add_3), kwargs = {}) triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (16, ), (1, )) assert_size_stride(primals_17, (4, 16), (16, 1)) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [input_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_4, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_4, buf0, buf1, primals_2, primals_3, buf2, 64, grid=grid(64), stream=stream0) del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [query_2, query_key], Original ATen: [aten.div, aten.clone] triton_poi_fused_clone_div_2.run(buf5, primals_10, buf6, 16, 4, grid=grid(16, 4), stream=stream0) del primals_10 buf7 = reinterpret_tensor(buf5, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [query_key], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf3, primals_6, buf7, 16, 4, grid=grid(16, 4), stream=stream0) del primals_6 buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [query_key], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add, dec_mask], Original ATen: [aten.add, aten.gt] triton_poi_fused_add_gt_4.run(primals_1, buf9, 64, grid=grid(64), stream=stream0) del primals_1 buf10 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf3 # reuse buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [scores_1, attn], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_5.run(buf9, buf8, buf10, buf11, 64, grid=grid(64), stream=stream0) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [scores_1, attn], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_6.run(buf12, buf9, buf10, buf11, 256, grid=grid(256), stream=stream0) buf13 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [context_original], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf4, primals_8, buf13, 16, 4, grid=grid(16, 4), stream=stream0) del primals_8 buf14 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [context_original], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf13, (16, 4, 1), (4, 1, 0), 0), out=buf14) buf15 = reinterpret_tensor(buf10, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf14, buf15, 16, 4, grid=grid(16, 4), stream=stream0) buf16 = reinterpret_tensor(buf14, (16, 4), (4, 1), 0); del buf14 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_12 buf17 = buf1; del buf1 # reuse buf18 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [context_1, context_norm], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(buf16, primals_4, buf17, buf18, 16, grid=grid(16), stream=stream0) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [context_1, context_norm], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_9.run(buf16, primals_4, buf17, buf18, primals_13, primals_14, buf19, 64, grid=grid(64), stream=stream0) del buf17 del buf18 del primals_14 buf20 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_16, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf20) del primals_16 buf21 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add_2, mul_2, tanh, add_3, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] triton_poi_fused_add_mul_pow_tanh_10.run(buf20, buf21, 256, grid=grid(256), stream=stream0) buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf21, (16, 16), (16, 1), 0), reinterpret_tensor(primals_17, (16, 4), (1, 16), 0), out=buf22) buf23 = reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1), 0); del buf22 # reuse # Topologically Sorted Source Nodes: [context_1, output_1], Original ATen: [aten.add] triton_poi_fused_add_11.run(buf23, primals_18, buf16, primals_4, 64, grid=grid(64), stream=stream0) del primals_18 return (buf23, primals_4, primals_13, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf9, (4, 1, 4, 4), (16, 16, 4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf20, reinterpret_tensor(buf21, (16, 16), (16, 1), 0), primals_17, primals_15, primals_11, reinterpret_tensor(buf13, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0), primals_9, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.cuda import torch.distributed def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) def generate_relative_positions_matrix(length, max_relative_positions, cache=False): """Generate the clipped relative positions matrix for a given length and maximum relative positions""" if cache: distance_mat = torch.arange(-length + 1, 1, 1).unsqueeze(0) else: range_vec = torch.arange(length) range_mat = range_vec.unsqueeze(-1).expand(-1, length).transpose(0, 1) distance_mat = range_mat - range_mat.transpose(0, 1) distance_mat_clipped = torch.clamp(distance_mat, min=- max_relative_positions, max=max_relative_positions) final_mat = distance_mat_clipped + max_relative_positions return final_mat def relative_matmul(x, z, transpose): """Helper function for relative positions attention.""" batch_size = x.shape[0] heads = x.shape[1] length = x.shape[2] x_t = x.permute(2, 0, 1, 3) x_t_r = x_t.reshape(length, heads * batch_size, -1) if transpose: z_t = z.transpose(1, 2) x_tz_matmul = torch.matmul(x_t_r, z_t) else: x_tz_matmul = torch.matmul(x_t_r, z) x_tz_matmul_r = x_tz_matmul.reshape(length, batch_size, heads, -1) x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3) return x_tz_matmul_r_t class MLP(nn.Module): def __init__(self, n_embd, n_state, dropout): super(MLP, self).__init__() self.c_fc = nn.Linear(n_embd, n_state) self.c_proj = nn.Linear(n_state, n_embd) self.act = gelu self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) self.reset_parameters() def reset_parameters(self): self.c_fc.weight.data.normal_(std=0.02) self.c_fc.bias.data.zero_() self.c_proj.weight.data.normal_(std=0.02) self.c_proj.bias.data.zero_() def forward(self, x): """ x is input, [T, B, n_state] """ h = self.dropout_1(self.act(self.c_fc(x))) h2 = self.dropout_2(self.c_proj(h)) return h2 class MultiHeadedAttention(nn.Module): """Multi-Head Attention module from "Attention is All You Need" :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`. Similar to standard `dot` attention but uses multiple attention distributions simulataneously to select relevant items. .. mermaid:: graph BT A[key] B[value] C[query] O[output] subgraph Attn D[Attn 1] E[Attn 2] F[Attn N] end A --> D C --> D A --> E C --> E A --> F C --> F D --> O E --> O F --> O B --> O Also includes several additional tricks. Args: head_count (int): number of parallel heads model_dim (int): the dimension of keys/values/queries, must be divisible by head_count dropout (float): dropout parameter """ def __init__(self, head_count, model_dim, dropout=0.1, max_relative_positions=0): assert model_dim % head_count == 0 self.dim_per_head = model_dim // head_count self.model_dim = model_dim super(MultiHeadedAttention, self).__init__() self.head_count = head_count self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head) self.linear_values = nn.Linear(model_dim, head_count * self. dim_per_head) self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head ) self.softmax = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) self.final_linear = nn.Linear(model_dim, model_dim) self.max_relative_positions = max_relative_positions if max_relative_positions > 0: vocab_size = max_relative_positions * 2 + 1 self.relative_positions_embeddings = nn.Embedding(vocab_size, self.dim_per_head) def forward(self, key, value, query, mask=None, layer_cache=None, type=None ): """ Compute the context vector and the attention vectors. Args: key (FloatTensor): set of `key_len` key vectors ``(batch, key_len, dim)`` value (FloatTensor): set of `key_len` value vectors ``(batch, key_len, dim)`` query (FloatTensor): set of `query_len` query vectors ``(batch, query_len, dim)`` mask: binary mask indicating which keys have non-zero attention ``(batch, query_len, key_len)`` Returns: (FloatTensor, FloatTensor): * output context vectors ``(batch, query_len, dim)`` * one of the attention vectors ``(batch, query_len, key_len)`` """ batch_size = key.size(0) dim_per_head = self.dim_per_head head_count = self.head_count key_len = key.size(1) query_len = query.size(1) def shape(x): """Projection.""" return x.view(batch_size, -1, head_count, dim_per_head).transpose( 1, 2) def unshape(x): """Compute context.""" return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head) if layer_cache is not None: if type == 'self': query, key, value = self.linear_query(query), self.linear_keys( query), self.linear_values(query) key = shape(key) value = shape(value) if layer_cache['self_keys'] is not None: key = torch.cat((layer_cache['self_keys'], key), dim=2) if layer_cache['self_values'] is not None: value = torch.cat((layer_cache['self_values'], value), dim=2) layer_cache['self_keys'] = key layer_cache['self_values'] = value elif type == 'context': query = self.linear_query(query) if layer_cache['memory_keys'] is None: key, value = self.linear_keys(key), self.linear_values( value) key = shape(key) value = shape(value) else: key, value = layer_cache['memory_keys'], layer_cache[ 'memory_values'] layer_cache['memory_keys'] = key layer_cache['memory_values'] = value else: key = self.linear_keys(key) value = self.linear_values(value) query = self.linear_query(query) key = shape(key) value = shape(value) if self.max_relative_positions > 0 and type == 'self': key_len = key.size(2) relative_positions_matrix = generate_relative_positions_matrix( key_len, self.max_relative_positions, cache=True if layer_cache is not None else False) relations_keys = self.relative_positions_embeddings( relative_positions_matrix) relations_values = self.relative_positions_embeddings( relative_positions_matrix) query = shape(query) key_len = key.size(2) query_len = query.size(2) query = query / math.sqrt(dim_per_head) query_key = torch.matmul(query, key.transpose(2, 3)) if self.max_relative_positions > 0 and type == 'self': scores = query_key + relative_matmul(query, relations_keys, True) else: scores = query_key scores = scores.float() if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask, -1e+18) attn = self.softmax(scores) drop_attn = self.dropout(attn) context_original = torch.matmul(drop_attn, value) if self.max_relative_positions > 0 and type == 'self': context = unshape(context_original + relative_matmul(drop_attn, relations_values, False)) else: context = unshape(context_original) output = self.final_linear(context) top_attn = attn.view(batch_size, head_count, query_len, key_len)[:, 0, :, :].contiguous() return output, top_attn class TransformerGPTEncoderLayer(nn.Module): """ A single layer of the transformer encoder. Args: d_model (int): the dimension of keys/values/queries in MultiHeadedAttention, also the input size of the first-layer of the PositionwiseFeedForward. heads (int): the number of head for MultiHeadedAttention. d_ff (int): the second-layer of the PositionwiseFeedForward. dropout (float): dropout probability(0-1.0). """ def __init__(self, d_model, heads, d_ff, dropout, attn_dropout, max_relative_positions=0): super(TransformerGPTEncoderLayer, self).__init__() self.self_attn = MultiHeadedAttention(heads, d_model, dropout= attn_dropout, max_relative_positions=max_relative_positions) self.feed_forward = MLP(d_model, d_model * 4, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-05) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-05) self.dropout = nn.Dropout(dropout) def forward(self, inputs, mask): """ Args: inputs (FloatTensor): ``(batch_size, src_len, model_dim)`` mask (LongTensor): ``(batch_size, src_len, src_len)`` Returns: (FloatTensor): * outputs ``(batch_size, src_len, model_dim)`` """ dec_mask = None src_len = mask.size(-1) future_mask = torch.ones([src_len, src_len], device=mask.device, dtype=torch.uint8) future_mask = future_mask.triu_(1).view(1, src_len, src_len) dec_mask = torch.gt(mask + future_mask, 0) input_norm = self.layer_norm_1(inputs) context, _ = self.self_attn(input_norm, input_norm, input_norm, mask=dec_mask, type='self') context = self.dropout(context) + inputs context_norm = self.layer_norm_2(context) output = self.feed_forward(context_norm) output = output + context return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'heads': 4, 'd_ff': 4, 'dropout': 0.5, 'attn_dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.cuda import torch.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_div_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_add_gt_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = x0 + -1 * x1 tmp2 = tl.full([1], 1, tl.int64) tmp3 = tmp1 >= tmp2 tmp4 = tl.full([1], 1, tl.uint8) tmp5 = tl.full([1], 0, tl.uint8) tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp6.to(tl.float32) tmp8 = tmp0 + tmp7 tmp9 = 0.0 tmp10 = tmp8 > tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last').to(tl.int1) tmp5 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp13 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp2 = -9.999999843067494e+17 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp6 = tl.where(tmp4, tmp2, tmp5) tmp7 = triton_helpers.maximum(tmp3, tmp6) tmp10 = tl.where(tmp8, tmp2, tmp9) tmp11 = triton_helpers.maximum(tmp7, tmp10) tmp14 = tl.where(tmp12, tmp2, tmp13) tmp15 = triton_helpers.maximum(tmp11, tmp14) tmp16 = tmp3 - tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp6 - tmp15 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp10 - tmp15 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp14 - tmp15 tmp25 = tl_math.exp(tmp24) tmp26 = tmp23 + tmp25 tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x6 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x5, xmask) tmp4 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last') tmp2 = -9.999999843067494e+17 tmp3 = tl.where(tmp0, tmp2, tmp1) tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tl.store(in_out_ptr0 + x5, tmp8, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_mul_pow_tanh_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (16,), (1,)) assert_size_stride(primals_17, (4, 16), (16, 1)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_4, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_4, buf0, buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_div_2[grid(16, 4)](buf5, primals_10, buf6, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_10 buf7 = reinterpret_tensor(buf5, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf5 triton_poi_fused_clone_3[grid(16, 4)](buf3, primals_6, buf7, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_6 buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 0, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_gt_4[grid(64)](primals_1, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf10 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf3 buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_5[grid(64)](buf9, buf8, buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf8 triton_poi_fused__softmax_masked_fill_6[grid(256)](buf12, buf9, buf10, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf11 triton_poi_fused_clone_3[grid(16, 4)](buf4, primals_8, buf13, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_8 buf14 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf13, (16, 4, 1), (4, 1, 0), 0), out=buf14) buf15 = reinterpret_tensor(buf10, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf10 triton_poi_fused_clone_7[grid(16, 4)](buf14, buf15, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf16 = reinterpret_tensor(buf14, (16, 4), (4, 1), 0) del buf14 extern_kernels.addmm(primals_12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_12 buf17 = buf1 del buf1 buf18 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](buf16, primals_4, buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](buf16, primals_4, buf17, buf18, primals_13, primals_14, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf17 del buf18 del primals_14 buf20 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_16, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf20) del primals_16 buf21 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_add_mul_pow_tanh_10[grid(256)](buf20, buf21, 256, XBLOCK=256, num_warps=4, num_stages=1) buf22 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf21, (16, 16), (16, 1), 0), reinterpret_tensor(primals_17, (16, 4), (1, 16), 0), out=buf22) buf23 = reinterpret_tensor(buf22, (4, 4, 4), (16, 4, 1), 0) del buf22 triton_poi_fused_add_11[grid(64)](buf23, primals_18, buf16, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_18 return buf23, primals_4, primals_13, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf9, (4, 1, 4, 4), (16, 16, 4, 1), 0 ), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf19, (16, 4), (4, 1), 0 ), buf20, reinterpret_tensor(buf21, (16, 16), (16, 1), 0 ), primals_17, primals_15, primals_11, reinterpret_tensor(buf13, ( 16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 4), 0 ), primals_9, primals_7, primals_5 def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) def generate_relative_positions_matrix(length, max_relative_positions, cache=False): """Generate the clipped relative positions matrix for a given length and maximum relative positions""" if cache: distance_mat = torch.arange(-length + 1, 1, 1).unsqueeze(0) else: range_vec = torch.arange(length) range_mat = range_vec.unsqueeze(-1).expand(-1, length).transpose(0, 1) distance_mat = range_mat - range_mat.transpose(0, 1) distance_mat_clipped = torch.clamp(distance_mat, min=- max_relative_positions, max=max_relative_positions) final_mat = distance_mat_clipped + max_relative_positions return final_mat def relative_matmul(x, z, transpose): """Helper function for relative positions attention.""" batch_size = x.shape[0] heads = x.shape[1] length = x.shape[2] x_t = x.permute(2, 0, 1, 3) x_t_r = x_t.reshape(length, heads * batch_size, -1) if transpose: z_t = z.transpose(1, 2) x_tz_matmul = torch.matmul(x_t_r, z_t) else: x_tz_matmul = torch.matmul(x_t_r, z) x_tz_matmul_r = x_tz_matmul.reshape(length, batch_size, heads, -1) x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3) return x_tz_matmul_r_t class MLP(nn.Module): def __init__(self, n_embd, n_state, dropout): super(MLP, self).__init__() self.c_fc = nn.Linear(n_embd, n_state) self.c_proj = nn.Linear(n_state, n_embd) self.act = gelu self.dropout_1 = nn.Dropout(dropout) self.dropout_2 = nn.Dropout(dropout) self.reset_parameters() def reset_parameters(self): self.c_fc.weight.data.normal_(std=0.02) self.c_fc.bias.data.zero_() self.c_proj.weight.data.normal_(std=0.02) self.c_proj.bias.data.zero_() def forward(self, x): """ x is input, [T, B, n_state] """ h = self.dropout_1(self.act(self.c_fc(x))) h2 = self.dropout_2(self.c_proj(h)) return h2 class MultiHeadedAttention(nn.Module): """Multi-Head Attention module from "Attention is All You Need" :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`. Similar to standard `dot` attention but uses multiple attention distributions simulataneously to select relevant items. .. mermaid:: graph BT A[key] B[value] C[query] O[output] subgraph Attn D[Attn 1] E[Attn 2] F[Attn N] end A --> D C --> D A --> E C --> E A --> F C --> F D --> O E --> O F --> O B --> O Also includes several additional tricks. Args: head_count (int): number of parallel heads model_dim (int): the dimension of keys/values/queries, must be divisible by head_count dropout (float): dropout parameter """ def __init__(self, head_count, model_dim, dropout=0.1, max_relative_positions=0): assert model_dim % head_count == 0 self.dim_per_head = model_dim // head_count self.model_dim = model_dim super(MultiHeadedAttention, self).__init__() self.head_count = head_count self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head) self.linear_values = nn.Linear(model_dim, head_count * self. dim_per_head) self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head ) self.softmax = nn.Softmax(dim=-1) self.dropout = nn.Dropout(dropout) self.final_linear = nn.Linear(model_dim, model_dim) self.max_relative_positions = max_relative_positions if max_relative_positions > 0: vocab_size = max_relative_positions * 2 + 1 self.relative_positions_embeddings = nn.Embedding(vocab_size, self.dim_per_head) def forward(self, key, value, query, mask=None, layer_cache=None, type=None ): """ Compute the context vector and the attention vectors. Args: key (FloatTensor): set of `key_len` key vectors ``(batch, key_len, dim)`` value (FloatTensor): set of `key_len` value vectors ``(batch, key_len, dim)`` query (FloatTensor): set of `query_len` query vectors ``(batch, query_len, dim)`` mask: binary mask indicating which keys have non-zero attention ``(batch, query_len, key_len)`` Returns: (FloatTensor, FloatTensor): * output context vectors ``(batch, query_len, dim)`` * one of the attention vectors ``(batch, query_len, key_len)`` """ batch_size = key.size(0) dim_per_head = self.dim_per_head head_count = self.head_count key_len = key.size(1) query_len = query.size(1) def shape(x): """Projection.""" return x.view(batch_size, -1, head_count, dim_per_head).transpose( 1, 2) def unshape(x): """Compute context.""" return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head) if layer_cache is not None: if type == 'self': query, key, value = self.linear_query(query), self.linear_keys( query), self.linear_values(query) key = shape(key) value = shape(value) if layer_cache['self_keys'] is not None: key = torch.cat((layer_cache['self_keys'], key), dim=2) if layer_cache['self_values'] is not None: value = torch.cat((layer_cache['self_values'], value), dim=2) layer_cache['self_keys'] = key layer_cache['self_values'] = value elif type == 'context': query = self.linear_query(query) if layer_cache['memory_keys'] is None: key, value = self.linear_keys(key), self.linear_values( value) key = shape(key) value = shape(value) else: key, value = layer_cache['memory_keys'], layer_cache[ 'memory_values'] layer_cache['memory_keys'] = key layer_cache['memory_values'] = value else: key = self.linear_keys(key) value = self.linear_values(value) query = self.linear_query(query) key = shape(key) value = shape(value) if self.max_relative_positions > 0 and type == 'self': key_len = key.size(2) relative_positions_matrix = generate_relative_positions_matrix( key_len, self.max_relative_positions, cache=True if layer_cache is not None else False) relations_keys = self.relative_positions_embeddings( relative_positions_matrix) relations_values = self.relative_positions_embeddings( relative_positions_matrix) query = shape(query) key_len = key.size(2) query_len = query.size(2) query = query / math.sqrt(dim_per_head) query_key = torch.matmul(query, key.transpose(2, 3)) if self.max_relative_positions > 0 and type == 'self': scores = query_key + relative_matmul(query, relations_keys, True) else: scores = query_key scores = scores.float() if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask, -1e+18) attn = self.softmax(scores) drop_attn = self.dropout(attn) context_original = torch.matmul(drop_attn, value) if self.max_relative_positions > 0 and type == 'self': context = unshape(context_original + relative_matmul(drop_attn, relations_values, False)) else: context = unshape(context_original) output = self.final_linear(context) top_attn = attn.view(batch_size, head_count, query_len, key_len)[:, 0, :, :].contiguous() return output, top_attn class TransformerGPTEncoderLayerNew(nn.Module): """ A single layer of the transformer encoder. Args: d_model (int): the dimension of keys/values/queries in MultiHeadedAttention, also the input size of the first-layer of the PositionwiseFeedForward. heads (int): the number of head for MultiHeadedAttention. d_ff (int): the second-layer of the PositionwiseFeedForward. dropout (float): dropout probability(0-1.0). """ def __init__(self, d_model, heads, d_ff, dropout, attn_dropout, max_relative_positions=0): super(TransformerGPTEncoderLayerNew, self).__init__() self.self_attn = MultiHeadedAttention(heads, d_model, dropout= attn_dropout, max_relative_positions=max_relative_positions) self.feed_forward = MLP(d_model, d_model * 4, dropout) self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-05) self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-05) self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1): primals_5 = self.self_attn.linear_keys.weight primals_2 = self.self_attn.linear_keys.bias primals_7 = self.self_attn.linear_values.weight primals_3 = self.self_attn.linear_values.bias primals_9 = self.self_attn.linear_query.weight primals_6 = self.self_attn.linear_query.bias primals_11 = self.self_attn.final_linear.weight primals_8 = self.self_attn.final_linear.bias primals_15 = self.feed_forward.c_fc.weight primals_16 = self.feed_forward.c_fc.bias primals_17 = self.feed_forward.c_proj.weight primals_10 = self.feed_forward.c_proj.bias primals_12 = self.layer_norm_1.weight primals_13 = self.layer_norm_1.bias primals_14 = self.layer_norm_2.weight primals_18 = self.layer_norm_2.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
fangleai/encoder-agnostic-adaptation
TransformerGPTEncoderLayer
false
15,378
[ "MIT" ]
70
d917e654152df202dd35bba49c409c3ecd24eaf7
https://github.com/fangleai/encoder-agnostic-adaptation/tree/d917e654152df202dd35bba49c409c3ecd24eaf7
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/hx/chxqssslfjy3nakpoakgjbi4drv34uniaclaqtbdes42vf4arfbw.py # Topologically Sorted Source Nodes: [product, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, coefficient, batch_loss, product_1, intersection_1, mul_3, add_4, sum_5, sum_6, add_5, add_6, coefficient_1, batch_loss_1, product_2, intersection_2, mul_5, add_7, sum_8, sum_9, add_8, add_9, coefficient_2, batch_loss_2, product_3, intersection_3, mul_7, add_10, sum_11, sum_12, add_11, add_12, coefficient_3, batch_loss_3, batch_loss_4, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_10 => add_12 # add_11 => add_13 # add_12 => add_14 # add_2 => add_2 # add_4 => add_4 # add_5 => add_5 # add_6 => add_6 # add_7 => add_8 # add_8 => add_9 # add_9 => add_10 # batch_loss => add_3 # batch_loss_1 => add_7 # batch_loss_2 => add_11 # batch_loss_3 => add_15 # batch_loss_4 => div_4 # coefficient => div # coefficient_1 => div_1 # coefficient_2 => div_2 # coefficient_3 => div_3 # intersection => sum_1 # intersection_1 => sum_4 # intersection_2 => sum_7 # intersection_3 => sum_10 # mul_1 => mul_1 # mul_3 => mul_3 # mul_5 => mul_5 # mul_7 => mul_7 # product => mul # product_1 => mul_2 # product_2 => mul_4 # product_3 => mul_6 # sub => sub # sum_11 => sum_11 # sum_12 => sum_12 # sum_2 => sum_2 # sum_3 => sum_3 # sum_5 => sum_5 # sum_6 => sum_6 # sum_8 => sum_8 # sum_9 => sum_9 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %select_3), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_2,), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_3,), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, %sum_6), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, 1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_4, %add_6), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %div_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %select_5), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_4,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_7, 2), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 1), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_4,), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_5,), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_8, %sum_9), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, 1), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_8, %add_10), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %div_2), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, %select_7), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_6,), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_10, 2), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, 1), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_6,), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_7,), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_11, %sum_12), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, 1), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_12, %add_14), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %div_3), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_15, 4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_4), kwargs = {}) triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 12, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp12 = tl.load(in_ptr0 + (64 + r0), None) tmp13 = tl.load(in_ptr1 + (64 + r0), None) tmp24 = tl.load(in_ptr0 + (128 + r0), None) tmp25 = tl.load(in_ptr1 + (128 + r0), None) tmp36 = tl.load(in_ptr0 + (192 + r0), None) tmp37 = tl.load(in_ptr1 + (192 + r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 2.0 tmp49 = tmp5 * tmp48 tmp50 = 1.0 tmp51 = tmp49 + tmp50 tmp52 = tmp8 + tmp11 tmp53 = tmp52 + tmp50 tmp54 = tmp51 / tmp53 tmp55 = 0.0 tmp56 = tmp54 + tmp55 tmp57 = tmp17 * tmp48 tmp58 = tmp57 + tmp50 tmp59 = tmp20 + tmp23 tmp60 = tmp59 + tmp50 tmp61 = tmp58 / tmp60 tmp62 = tmp56 + tmp61 tmp63 = tmp29 * tmp48 tmp64 = tmp63 + tmp50 tmp65 = tmp32 + tmp35 tmp66 = tmp65 + tmp50 tmp67 = tmp64 / tmp66 tmp68 = tmp62 + tmp67 tmp69 = tmp41 * tmp48 tmp70 = tmp69 + tmp50 tmp71 = tmp44 + tmp47 tmp72 = tmp71 + tmp50 tmp73 = tmp70 / tmp72 tmp74 = tmp68 + tmp73 tmp75 = 0.25 tmp76 = tmp74 * tmp75 tmp77 = tmp50 - tmp76 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp77, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf12 = buf0; del buf0 # reuse buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [product, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, coefficient, batch_loss, product_1, intersection_1, mul_3, add_4, sum_5, sum_6, add_5, add_6, coefficient_1, batch_loss_1, product_2, intersection_2, mul_5, add_7, sum_8, sum_9, add_8, add_9, coefficient_2, batch_loss_2, product_3, intersection_3, mul_7, add_10, sum_11, sum_12, add_11, add_12, coefficient_3, batch_loss_3, batch_loss_4, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0.run(buf13, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DiceLoss(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLoss, self).__init__() def forward(self, predicted, target): """ Method for calculation of loss from sample. Parameters: predicted(torch.Tensor): Predicted output of the network. Shape - (Batch Size,Channel,Height,Width) target(torch.Tensor): Actual required output for the network Shape - (Batch Size,Channel,Height,Width) Returns: The mean dice Loss over the batch size. """ batch = predicted.size()[0] batch_loss = 0 for index in range(batch): coefficient = self._dice_coefficient(predicted[index], target[ index]) batch_loss += coefficient batch_loss = batch_loss / batch return 1 - batch_loss def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr0 + (64 + r0), None) tmp13 = tl.load(in_ptr1 + (64 + r0), None) tmp24 = tl.load(in_ptr0 + (128 + r0), None) tmp25 = tl.load(in_ptr1 + (128 + r0), None) tmp36 = tl.load(in_ptr0 + (192 + r0), None) tmp37 = tl.load(in_ptr1 + (192 + r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 2.0 tmp49 = tmp5 * tmp48 tmp50 = 1.0 tmp51 = tmp49 + tmp50 tmp52 = tmp8 + tmp11 tmp53 = tmp52 + tmp50 tmp54 = tmp51 / tmp53 tmp55 = 0.0 tmp56 = tmp54 + tmp55 tmp57 = tmp17 * tmp48 tmp58 = tmp57 + tmp50 tmp59 = tmp20 + tmp23 tmp60 = tmp59 + tmp50 tmp61 = tmp58 / tmp60 tmp62 = tmp56 + tmp61 tmp63 = tmp29 * tmp48 tmp64 = tmp63 + tmp50 tmp65 = tmp32 + tmp35 tmp66 = tmp65 + tmp50 tmp67 = tmp64 / tmp66 tmp68 = tmp62 + tmp67 tmp69 = tmp41 * tmp48 tmp70 = tmp69 + tmp50 tmp71 = tmp44 + tmp47 tmp72 = tmp71 + tmp50 tmp73 = tmp70 / tmp72 tmp74 = tmp68 + tmp73 tmp75 = 0.25 tmp76 = tmp74 * tmp75 tmp77 = tmp50 - tmp76 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp77, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf12 = buf0 del buf0 buf13 = buf12 del buf12 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf13, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf13, class DiceLossNew(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLossNew, self).__init__() def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
g-freire/Brain-Tumor-Segmentation
DiceLoss
false
15,379
[ "MIT" ]
156
e4f258feb64c11815570e295c58bda78afd21ab9
https://github.com/g-freire/Brain-Tumor-Segmentation/tree/e4f258feb64c11815570e295c58bda78afd21ab9
MaxPool2dLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7r/c7rzxjltguesvkazbysb26gqvur5priptu7klxnxqooaclt5izep.py # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # max_pool2d => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 2 x2 = (xindex // 4) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + (4*x1) + (16*x2)), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + (4*x1) + (16*x2)), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + (4*x1) + (16*x2)), xmask) tmp7 = tl.load(in_ptr0 + (5 + x0 + (4*x1) + (16*x2)), xmask) tmp9 = tl.load(in_ptr0 + (6 + x0 + (4*x1) + (16*x2)), xmask) tmp11 = tl.load(in_ptr0 + (8 + x0 + (4*x1) + (16*x2)), xmask) tmp13 = tl.load(in_ptr0 + (9 + x0 + (4*x1) + (16*x2)), xmask) tmp15 = tl.load(in_ptr0 + (10 + x0 + (4*x1) + (16*x2)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class MaxPool2dLayer(nn.Module): def forward(self, tensor, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False): return F.max_pool2d(tensor, kernel_size, stride=stride, padding= padding, ceil_mode=ceil_mode) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), xmask) tmp7 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), xmask) tmp9 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), xmask) tmp11 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), xmask) tmp13 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), xmask) tmp15 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxPool2dLayerNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
fuzhanrahmanian/lucent
MaxPool2dLayer
false
15,380
[ "Apache-2.0" ]
449
13b24c3c37784185275da73c7a11095b2ae809c5
https://github.com/fuzhanrahmanian/lucent/tree/13b24c3c37784185275da73c7a11095b2ae809c5
CosineBasisLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/vm/cvmxb4fm3754gb5xbvn445attxbnlnqmgxmnql4g235hqqjxgk4t.py # Topologically Sorted Source Nodes: [arange, i_pi, mul_1, embedding], Original ATen: [aten.arange, aten.mul, aten.cos] # Source node to ATen node mapping: # arange => add, convert_element_type, iota, mul # embedding => cos # i_pi => mul_1 # mul_1 => mul_2 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 3.141592653589793), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %mul_1), kwargs = {}) # %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%mul_2,), kwargs = {}) triton_poi_fused_arange_cos_mul_0 = async_compile.triton('triton_poi_fused_arange_cos_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_cos_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_arange_cos_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = 1 + x0 tmp2 = tmp1.to(tl.float32) tmp3 = 3.141592653589793 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl_math.cos(tmp5) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [arange, i_pi, mul_1, embedding], Original ATen: [aten.arange, aten.mul, aten.cos] stream0 = get_raw_stream(0) triton_poi_fused_arange_cos_mul_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 buf1 = empty_strided_cuda((256, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return (reinterpret_tensor(buf1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf0, (256, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn def cosine_basis_functions(x, n_basis_functions=64): """Cosine basis functions used to embed quantile thresholds. Args: x (torch.Tensor): Input. n_basis_functions (int): Number of cosine basis functions. Returns: ndarray: Embedding with shape of (x.shape + (n_basis_functions,)). """ i_pi = torch.arange(1, n_basis_functions + 1, dtype=torch.float, device =x.device) * np.pi embedding = torch.cos(x[..., None] * i_pi) assert embedding.shape == x.shape + (n_basis_functions,) return embedding class CosineBasisLinear(nn.Module): """Linear layer following cosine basis functions. Args: n_basis_functions (int): Number of cosine basis functions. out_size (int): Output size. """ def __init__(self, n_basis_functions, out_size): super().__init__() self.linear = nn.Linear(n_basis_functions, out_size) self.n_basis_functions = n_basis_functions self.out_size = out_size def forward(self, x): """Evaluate. Args: x (torch.Tensor): Input. Returns: torch.Tensor: Output with shape of (x.shape + (out_size,)). """ h = cosine_basis_functions(x, self.n_basis_functions) h = h.reshape(-1, self.n_basis_functions) out = self.linear(h) out = out.reshape(*x.shape, self.out_size) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_basis_functions': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_arange_cos_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = 1 + x0 tmp2 = tmp1.to(tl.float32) tmp3 = 3.141592653589793 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp4 tmp6 = tl_math.cos(tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_arange_cos_mul_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((256, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (256, 4), (4, 1), 0) def cosine_basis_functions(x, n_basis_functions=64): """Cosine basis functions used to embed quantile thresholds. Args: x (torch.Tensor): Input. n_basis_functions (int): Number of cosine basis functions. Returns: ndarray: Embedding with shape of (x.shape + (n_basis_functions,)). """ i_pi = torch.arange(1, n_basis_functions + 1, dtype=torch.float, device =x.device) * np.pi embedding = torch.cos(x[..., None] * i_pi) assert embedding.shape == x.shape + (n_basis_functions,) return embedding class CosineBasisLinearNew(nn.Module): """Linear layer following cosine basis functions. Args: n_basis_functions (int): Number of cosine basis functions. out_size (int): Output size. """ def __init__(self, n_basis_functions, out_size): super().__init__() self.linear = nn.Linear(n_basis_functions, out_size) self.n_basis_functions = n_basis_functions self.out_size = out_size def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
g-votte/pfrl
CosineBasisLinear
false
15,381
[ "MIT" ]
824
4c30c1d73f0941a2b649b62937eec346bb55a95e
https://github.com/g-votte/pfrl/tree/4c30c1d73f0941a2b649b62937eec346bb55a95e
FCLateActionSAQFunction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lg/clgg7lidrsfgaud7ua7lulq4csdjjfhoa5iu6ldcldqvnqeoprch.py # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # h_1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_4], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 8, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p5/cp5tfqfwrfwxlkkeq5u27dwcgidea534dshg3prbpaz2f7xapt3e.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # h => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 8), (8, 1)) assert_size_stride(primals_6, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_3, primals_4, buf1, 32, grid=grid(32), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf0, primals_3, buf4, 16, grid=grid(16), stream=stream0) del buf0 del primals_3 return (buf3, primals_1, buf1, primals_5, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn from abc import ABCMeta from abc import abstractmethod import torch.nn.functional as F def init_lecun_normal(tensor, scale=1.0): """Initializes the tensor with LeCunNormal.""" fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in') std = scale * np.sqrt(1.0 / fan_in) with torch.no_grad(): return tensor.normal_(0, std) @torch.no_grad() def init_chainer_default(layer): """Initializes the layer with the chainer default. weights with LeCunNormal(scale=1.0) and zeros as biases """ assert isinstance(layer, nn.Module) if isinstance(layer, (nn.Linear, nn.Conv2d)): init_lecun_normal(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) return layer class MLP(nn.Module): """Multi-Layer Perceptron""" def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu, last_wscale=1): self.in_size = in_size self.out_size = out_size self.hidden_sizes = hidden_sizes self.nonlinearity = nonlinearity super().__init__() if hidden_sizes: self.hidden_layers = nn.ModuleList() self.hidden_layers.append(nn.Linear(in_size, hidden_sizes[0])) for hin, hout in zip(hidden_sizes, hidden_sizes[1:]): self.hidden_layers.append(nn.Linear(hin, hout)) self.hidden_layers.apply(init_chainer_default) self.output = nn.Linear(hidden_sizes[-1], out_size) else: self.output = nn.Linear(in_size, out_size) init_lecun_normal(self.output.weight, scale=last_wscale) nn.init.zeros_(self.output.bias) def forward(self, x): h = x if self.hidden_sizes: for l in self.hidden_layers: h = self.nonlinearity(l(h)) return self.output(h) class StateActionQFunction(object, metaclass=ABCMeta): """Abstract Q-function with state and action input.""" @abstractmethod def __call__(self, x, a): """Evaluates Q-function Args: x (ndarray): state input a (ndarray): action input Returns: Q-value for state x and action a """ raise NotImplementedError() class FCLateActionSAQFunction(nn.Module, StateActionQFunction): """Fully-connected (s,a)-input Q-function with late action input. Actions are not included until the second hidden layer and not normalized. This architecture is used in the DDPG paper: http://arxiv.org/abs/1509.02971 Args: n_dim_obs (int): Number of dimensions of observation space. n_dim_action (int): Number of dimensions of action space. n_hidden_channels (int): Number of hidden channels. n_hidden_layers (int): Number of hidden layers. It must be greater than or equal to 1. nonlinearity (callable): Nonlinearity between layers. It must accept a Variable as an argument and return a Variable with the same shape. Nonlinearities with learnable parameters such as PReLU are not supported. last_wscale (float): Scale of weight initialization of the last layer. """ def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels, n_hidden_layers, nonlinearity=F.relu, last_wscale=1.0): assert n_hidden_layers >= 1 self.n_input_channels = n_dim_obs + n_dim_action self.n_hidden_layers = n_hidden_layers self.n_hidden_channels = n_hidden_channels self.nonlinearity = nonlinearity super().__init__() self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels, hidden_sizes=[]) self.mlp = MLP(in_size=n_hidden_channels + n_dim_action, out_size=1, hidden_sizes=[self.n_hidden_channels] * (self.n_hidden_layers - 1), nonlinearity=nonlinearity, last_wscale=last_wscale) self.output = self.mlp.output def forward(self, state, action): h = self.nonlinearity(self.obs_mlp(state)) h = torch.cat((h, action), dim=1) return self.mlp(h) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_dim_obs': 4, 'n_dim_action': 4, 'n_hidden_channels': 4, 'n_hidden_layers': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch import nn from abc import ABCMeta from abc import abstractmethod import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 8), (8, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](buf0, primals_3, primals_4, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, buf1, reinterpret_tensor(primals_5, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf0, primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del primals_3 return buf3, primals_1, buf1, primals_5, buf4 def init_lecun_normal(tensor, scale=1.0): """Initializes the tensor with LeCunNormal.""" fan_in = torch.nn.init._calculate_correct_fan(tensor, 'fan_in') std = scale * np.sqrt(1.0 / fan_in) with torch.no_grad(): return tensor.normal_(0, std) @torch.no_grad() def init_chainer_default(layer): """Initializes the layer with the chainer default. weights with LeCunNormal(scale=1.0) and zeros as biases """ assert isinstance(layer, nn.Module) if isinstance(layer, (nn.Linear, nn.Conv2d)): init_lecun_normal(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) return layer class MLP(nn.Module): """Multi-Layer Perceptron""" def __init__(self, in_size, out_size, hidden_sizes, nonlinearity=F.relu, last_wscale=1): self.in_size = in_size self.out_size = out_size self.hidden_sizes = hidden_sizes self.nonlinearity = nonlinearity super().__init__() if hidden_sizes: self.hidden_layers = nn.ModuleList() self.hidden_layers.append(nn.Linear(in_size, hidden_sizes[0])) for hin, hout in zip(hidden_sizes, hidden_sizes[1:]): self.hidden_layers.append(nn.Linear(hin, hout)) self.hidden_layers.apply(init_chainer_default) self.output = nn.Linear(hidden_sizes[-1], out_size) else: self.output = nn.Linear(in_size, out_size) init_lecun_normal(self.output.weight, scale=last_wscale) nn.init.zeros_(self.output.bias) def forward(self, x): h = x if self.hidden_sizes: for l in self.hidden_layers: h = self.nonlinearity(l(h)) return self.output(h) class StateActionQFunction(object, metaclass=ABCMeta): """Abstract Q-function with state and action input.""" @abstractmethod def __call__(self, x, a): """Evaluates Q-function Args: x (ndarray): state input a (ndarray): action input Returns: Q-value for state x and action a """ raise NotImplementedError() class FCLateActionSAQFunctionNew(nn.Module, StateActionQFunction): """Fully-connected (s,a)-input Q-function with late action input. Actions are not included until the second hidden layer and not normalized. This architecture is used in the DDPG paper: http://arxiv.org/abs/1509.02971 Args: n_dim_obs (int): Number of dimensions of observation space. n_dim_action (int): Number of dimensions of action space. n_hidden_channels (int): Number of hidden channels. n_hidden_layers (int): Number of hidden layers. It must be greater than or equal to 1. nonlinearity (callable): Nonlinearity between layers. It must accept a Variable as an argument and return a Variable with the same shape. Nonlinearities with learnable parameters such as PReLU are not supported. last_wscale (float): Scale of weight initialization of the last layer. """ def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels, n_hidden_layers, nonlinearity=F.relu, last_wscale=1.0): assert n_hidden_layers >= 1 self.n_input_channels = n_dim_obs + n_dim_action self.n_hidden_layers = n_hidden_layers self.n_hidden_channels = n_hidden_channels self.nonlinearity = nonlinearity super().__init__() self.obs_mlp = MLP(in_size=n_dim_obs, out_size=n_hidden_channels, hidden_sizes=[]) self.mlp = MLP(in_size=n_hidden_channels + n_dim_action, out_size=1, hidden_sizes=[self.n_hidden_channels] * (self.n_hidden_layers - 1), nonlinearity=nonlinearity, last_wscale=last_wscale) self.output = self.mlp.output def forward(self, input_0, input_1): primals_1 = self.obs_mlp.output.weight primals_3 = self.obs_mlp.output.bias primals_5 = self.mlp.output.weight primals_6 = self.mlp.output.bias primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
g-votte/pfrl
FCLateActionSAQFunction
false
15,382
[ "MIT" ]
824
4c30c1d73f0941a2b649b62937eec346bb55a95e
https://github.com/g-votte/pfrl/tree/4c30c1d73f0941a2b649b62937eec346bb55a95e
BertAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone] # Source node to ATen node mapping: # attention_scores => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wj/cwjlsgmqskxtouagjcds7x5qo2d5aepifdzyctdz7np37qgu52x3.py # Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_scores_3, attention_probs], Original ATen: [aten.div, aten.clamp, aten.add, aten._softmax] # Source node to ATen node mapping: # attention_probs => amax, exp, sub, sum_1 # attention_scores_1 => div # attention_scores_2 => clamp_max, clamp_min # attention_scores_3 => add # Graph fragment: # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, -10000.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 10000.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max, %primals_8), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_clamp_div_1 = async_compile.triton('triton_poi_fused__softmax_add_clamp_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_clamp_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_clamp_div_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = -10000.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 10000.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = triton_helpers.maximum(tmp10, tmp3) tmp12 = triton_helpers.minimum(tmp11, tmp5) tmp14 = tmp12 + tmp13 tmp15 = triton_helpers.maximum(tmp8, tmp14) tmp17 = tmp16 * tmp1 tmp18 = triton_helpers.maximum(tmp17, tmp3) tmp19 = triton_helpers.minimum(tmp18, tmp5) tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp15, tmp21) tmp24 = tmp23 * tmp1 tmp25 = triton_helpers.maximum(tmp24, tmp3) tmp26 = triton_helpers.minimum(tmp25, tmp5) tmp28 = tmp26 + tmp27 tmp29 = triton_helpers.maximum(tmp22, tmp28) tmp30 = tmp8 - tmp29 tmp31 = tl_math.exp(tmp30) tmp32 = tmp14 - tmp29 tmp33 = tl_math.exp(tmp32) tmp34 = tmp31 + tmp33 tmp35 = tmp21 - tmp29 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp38 = tmp28 - tmp29 tmp39 = tl_math.exp(tmp38) tmp40 = tmp37 + tmp39 tl.store(out_ptr0 + (x2), tmp29, xmask) tl.store(out_ptr1 + (x2), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ti/cti6y47xlsa5dnd5gbq6d5lcxcx5inq36elbcfsspqkgced4xgyn.py # Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_scores_3, attention_probs], Original ATen: [aten.div, aten.clamp, aten.add, aten._softmax, aten.ge, aten.le, aten.logical_and] # Source node to ATen node mapping: # attention_probs => amax, div_1, exp, sub # attention_scores_1 => div # attention_scores_2 => clamp_max, clamp_min # attention_scores_3 => add # Graph fragment: # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, -10000.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 10000.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max, %primals_8), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%div, -10000.0), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%div, 10000.0), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {}) triton_poi_fused__softmax_add_clamp_div_ge_le_logical_and_2 = async_compile.triton('triton_poi_fused__softmax_add_clamp_div_ge_le_logical_and_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_clamp_div_ge_le_logical_and_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_clamp_div_ge_le_logical_and_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp7 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = -10000.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 10000.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp14 = tmp2 >= tmp3 tmp15 = tmp2 <= tmp5 tmp16 = tmp14 & tmp15 tl.store(out_ptr0 + (x3), tmp13, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xt/cxtkkmujo4ytg6ycpz5lk5livtstr63pg5nsf5ijewjbtrfrqx6k.py # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # context_layer_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ow/cowvjylgtx4gqrpyivecujnxamvwun5m6ue5cfwykoe2cgsm5pmp.py # Topologically Sorted Source Nodes: [add_1, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow] # Source node to ATen node mapping: # add_1 => add_1 # pow_1 => pow_1 # s => mean_1 # sub => sub_1 # u => mean # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {}) # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) triton_poi_fused_add_mean_pow_sub_4 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_pow_sub_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ai/caipfk2erpwmcdcz3nupnay52ivkphhvned5iifefb24ymnvwlwk.py # Topologically Sorted Source Nodes: [add_1, u, sub, add_2, sqrt, x_3, mul, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # hidden_states_2 => add_3 # mul => mul # sqrt => sqrt # sub => sub_1 # u => mean # x_3 => div_2 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {}) # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %div_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_12), kwargs = {}) triton_poi_fused_add_div_mean_mul_sqrt_sub_5 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x2), xmask) tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_scores_3, attention_probs], Original ATen: [aten.div, aten.clamp, aten.add, aten._softmax] triton_poi_fused__softmax_add_clamp_div_1.run(buf5, primals_8, buf6, buf7, 64, grid=grid(64), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_scores_3, attention_probs], Original ATen: [aten.div, aten.clamp, aten.add, aten._softmax, aten.ge, aten.le, aten.logical_and] triton_poi_fused__softmax_add_clamp_div_ge_le_logical_and_2.run(buf5, primals_8, buf6, buf7, buf8, buf16, 256, grid=grid(256), stream=stream0) del buf5 del primals_8 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_7, buf9, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_10 buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add_1, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow] triton_poi_fused_add_mean_pow_sub_4.run(buf12, primals_3, buf13, buf14, 16, grid=grid(16), stream=stream0) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_1, u, sub, add_2, sqrt, x_3, mul, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_sqrt_sub_5.run(primals_11, buf12, primals_3, buf13, buf14, primals_12, buf15, 64, grid=grid(64), stream=stream0) del buf13 del buf14 del primals_12 return (buf15, primals_3, primals_11, buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), buf16, reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch from torch import nn class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = torch.clamp(attention_scores, -10000.0, 10000.0) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_clamp_div_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = -10000.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 10000.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 + tmp7 tmp10 = tmp9 * tmp1 tmp11 = triton_helpers.maximum(tmp10, tmp3) tmp12 = triton_helpers.minimum(tmp11, tmp5) tmp14 = tmp12 + tmp13 tmp15 = triton_helpers.maximum(tmp8, tmp14) tmp17 = tmp16 * tmp1 tmp18 = triton_helpers.maximum(tmp17, tmp3) tmp19 = triton_helpers.minimum(tmp18, tmp5) tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp15, tmp21) tmp24 = tmp23 * tmp1 tmp25 = triton_helpers.maximum(tmp24, tmp3) tmp26 = triton_helpers.minimum(tmp25, tmp5) tmp28 = tmp26 + tmp27 tmp29 = triton_helpers.maximum(tmp22, tmp28) tmp30 = tmp8 - tmp29 tmp31 = tl_math.exp(tmp30) tmp32 = tmp14 - tmp29 tmp33 = tl_math.exp(tmp32) tmp34 = tmp31 + tmp33 tmp35 = tmp21 - tmp29 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp38 = tmp28 - tmp29 tmp39 = tl_math.exp(tmp38) tmp40 = tmp37 + tmp39 tl.store(out_ptr0 + x2, tmp29, xmask) tl.store(out_ptr1 + x2, tmp40, xmask) @triton.jit def triton_poi_fused__softmax_add_clamp_div_ge_le_logical_and_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp7 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = -10000.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 10000.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp8 = tmp6 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp14 = tmp2 >= tmp3 tmp15 = tmp2 <= tmp5 tmp16 = tmp14 & tmp15 tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mean_pow_sub_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_clamp_div_1[grid(64)](buf5, primals_8, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused__softmax_add_clamp_div_ge_le_logical_and_2[grid(256)]( buf5, primals_8, buf6, buf7, buf8, buf16, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del primals_8 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf9, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_10, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_10 buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_pow_sub_4[grid(16)](buf12, primals_3, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_sub_5[grid(64)](primals_11, buf12, primals_3, buf13, buf14, primals_12, buf15, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_12 return buf15, primals_3, primals_11, buf8, reinterpret_tensor(buf11, ( 16, 4), (4, 1), 0), buf12, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), buf16, reinterpret_tensor(buf3, (16, 1, 4), ( 4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = torch.clamp(attention_scores, -10000.0, 10000.0) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttentionNew(nn.Module): def __init__(self, config): super(BertAttentionNew, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_0, input_1): primals_1 = self.self.query.weight primals_2 = self.self.query.bias primals_4 = self.self.key.weight primals_5 = self.self.key.bias primals_6 = self.self.value.weight primals_7 = self.self.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.weight primals_12 = self.output.LayerNorm.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
BIT-ENGD/eeqa
BertAttention
false
15,383
[ "MIT" ]
142
2995abbaff1fb47131246a247ee7ed62aa94f4c3
https://github.com/BIT-ENGD/eeqa/tree/2995abbaff1fb47131246a247ee7ed62aa94f4c3
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tz/ctzy7ve4z3i7lqshzeg47cdndeohawhnn4shd5m4djcnph2bosgc.py # Topologically Sorted Source Nodes: [prob, sub_2, pos_weight, mul_2, clamp, abs_1, neg, exp, add, log, sub, clamp_1, mul, pos_log_sig, mul_3, sub_3, neg_weight, mul_4, neg_1, clamp_2, abs_2, neg_2, exp_1, add_2, log_1, sub_1, clamp_3, mul_1, neg_log_sig, mul_5, add_4, loss, mul_6, sub_4, mul_7, avg_weight, mean, loss_1, mean_1], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.clamp, aten.abs, aten.neg, aten.exp, aten.add, aten.log, aten.sub, aten.mean, aten.div] # Source node to ATen node mapping: # abs_1 => abs_1 # abs_2 => abs_2 # add => add # add_2 => add_2 # add_4 => add_4 # avg_weight => add_5 # clamp => clamp_max # clamp_1 => clamp_max_1, clamp_min # clamp_2 => clamp_max_2 # clamp_3 => clamp_max_3, clamp_min_1 # exp => exp # exp_1 => exp_1 # log => log # log_1 => log_1 # loss => neg_3 # loss_1 => div # mean => mean # mean_1 => mean_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # neg => neg # neg_1 => neg_1 # neg_2 => neg_2 # neg_log_sig => add_3 # neg_weight => pow_2 # pos_log_sig => add_1 # pos_weight => pow_1 # prob => sigmoid # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # sub_3 => sub_3 # sub_4 => sub_4 # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %pow_1), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%arg0_1, 0), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_max, %log), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_1, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %pow_2), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %clamp_max_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%neg_1, 0), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_2,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_2,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, 1), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_max_2, %log_1), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_3, 0.5), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %mul_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %add_3), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_5), kwargs = {}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add_4,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %pow_1), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %pow_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %mul_7), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_5,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg_3, %mean), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {}) triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0 = async_compile.triton('triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tl.sigmoid(tmp1) tmp3 = 1.0 tmp4 = tmp3 - tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp0 * tmp5 tmp7 = 0.0 tmp8 = triton_helpers.minimum(tmp1, tmp7) tmp9 = tl_math.abs(tmp1) tmp10 = -tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp11 + tmp3 tmp13 = tl_math.log(tmp12) tmp14 = tmp8 - tmp13 tmp15 = triton_helpers.maximum(tmp1, tmp7) tmp16 = triton_helpers.minimum(tmp15, tmp7) tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp6 * tmp19 tmp21 = tmp3 - tmp0 tmp22 = tmp2 * tmp2 tmp23 = tmp21 * tmp22 tmp24 = -tmp1 tmp25 = triton_helpers.minimum(tmp24, tmp7) tmp26 = tmp25 - tmp13 tmp27 = tmp26 + tmp18 tmp28 = tmp23 * tmp27 tmp29 = tmp20 + tmp28 tmp30 = -tmp29 tmp31 = tmp6 + tmp23 tmp32 = tl.broadcast_to(tmp31, [RBLOCK]) tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0)) tmp35 = 256.0 tmp36 = tmp34 / tmp35 tmp37 = tmp30 / tmp36 tmp38 = tl.broadcast_to(tmp37, [RBLOCK]) tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0)) tmp41 = tmp40 / tmp35 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp41, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [prob, sub_2, pos_weight, mul_2, clamp, abs_1, neg, exp, add, log, sub, clamp_1, mul, pos_log_sig, mul_3, sub_3, neg_weight, mul_4, neg_1, clamp_2, abs_2, neg_2, exp_1, add_2, log_1, sub_1, clamp_3, mul_1, neg_log_sig, mul_5, add_4, loss, mul_6, sub_4, mul_7, avg_weight, mean, loss_1, mean_1], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.clamp, aten.abs, aten.neg, aten.exp, aten.add, aten.log, aten.sub, aten.mean, aten.div] stream0 = get_raw_stream(0) triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0.run(buf3, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn def log_minus_sigmoid(x): return torch.clamp(-x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) def log_sigmoid(x): return torch.clamp(x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) class FocalLoss(nn.Module): def __init__(self, gamma=2): super(FocalLoss, self).__init__() self.gamma = gamma def forward(self, input, target): pos_log_sig = log_sigmoid(input) neg_log_sig = log_minus_sigmoid(input) prob = torch.sigmoid(input) pos_weight = torch.pow(1 - prob, self.gamma) neg_weight = torch.pow(prob, self.gamma) loss = -(target * pos_weight * pos_log_sig + (1 - target) * neg_weight * neg_log_sig) avg_weight = target * pos_weight + (1 - target) * neg_weight loss /= avg_weight.mean() return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tl.sigmoid(tmp1) tmp3 = 1.0 tmp4 = tmp3 - tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp0 * tmp5 tmp7 = 0.0 tmp8 = triton_helpers.minimum(tmp1, tmp7) tmp9 = tl_math.abs(tmp1) tmp10 = -tmp9 tmp11 = tl_math.exp(tmp10) tmp12 = tmp11 + tmp3 tmp13 = tl_math.log(tmp12) tmp14 = tmp8 - tmp13 tmp15 = triton_helpers.maximum(tmp1, tmp7) tmp16 = triton_helpers.minimum(tmp15, tmp7) tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp6 * tmp19 tmp21 = tmp3 - tmp0 tmp22 = tmp2 * tmp2 tmp23 = tmp21 * tmp22 tmp24 = -tmp1 tmp25 = triton_helpers.minimum(tmp24, tmp7) tmp26 = tmp25 - tmp13 tmp27 = tmp26 + tmp18 tmp28 = tmp23 * tmp27 tmp29 = tmp20 + tmp28 tmp30 = -tmp29 tmp31 = tmp6 + tmp23 tmp32 = tl.broadcast_to(tmp31, [RBLOCK]) tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0)) tmp35 = 256.0 tmp36 = tmp34 / tmp35 tmp37 = tmp30 / tmp36 tmp38 = tl.broadcast_to(tmp37, [RBLOCK]) tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0)) tmp41 = tmp40 / tmp35 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp41, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_abs_add_clamp_div_exp_log_mean_mul_neg_pow_rsub_sigmoid_sub_0[ grid(1)](buf3, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, def log_minus_sigmoid(x): return torch.clamp(-x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) def log_sigmoid(x): return torch.clamp(x, max=0) - torch.log(1 + torch.exp(-torch.abs(x)) ) + 0.5 * torch.clamp(x, min=0, max=0) class FocalLossNew(nn.Module): def __init__(self, gamma=2): super(FocalLossNew, self).__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
gabrielsluz/vince
FocalLoss
false
15,384
[ "Apache-2.0" ]
61
f4e17a2cf70c080a7e01e46d15537e33224c869b
https://github.com/gabrielsluz/vince/tree/f4e17a2cf70c080a7e01e46d15537e33224c869b
PPO
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] # Source node to ATen node mapping: # policy => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] # Source node to ATen node mapping: # policy => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_7 return (buf4, reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf4, primals_6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BatchMaker: def __init__(self, states, actions, returns, advantages, old_policies): self.states = states self.actions = actions self.returns = returns self.advantages = advantages self.old_policies = old_policies def sample(self): sample_indexes = random.sample(range(len(self.states)), batch_size) states_sample = self.states[sample_indexes] actions_sample = self.actions[sample_indexes] retruns_sample = self.returns[sample_indexes] advantages_sample = self.advantages[sample_indexes] old_policies_sample = self.old_policies[sample_indexes] return (states_sample, actions_sample, retruns_sample, advantages_sample, old_policies_sample) class PPO(nn.Module): def __init__(self, num_inputs, num_outputs): super(PPO, self).__init__() self.t = 0 self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc = nn.Linear(num_inputs, 128) self.fc_actor = nn.Linear(128, num_outputs) self.fc_critic = nn.Linear(128, 1) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) def forward(self, input): x = torch.relu(self.fc(input)) policy = F.softmax(self.fc_actor(x), dim=-1) value = self.fc_critic(x) return policy, value @classmethod def get_gae(self, values, rewards, masks): returns = torch.zeros_like(rewards) advantages = torch.zeros_like(rewards) running_return = 0 previous_value = 0 running_advantage = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] running_tderror = rewards[t] + gamma * previous_value * masks[t ] - values.data[t] running_advantage = (running_tderror + gamma * lambda_gae * running_advantage * masks[t]) returns[t] = running_return previous_value = values.data[t] advantages[t] = running_advantage return returns, advantages @classmethod def train_model(cls, net, transitions, optimizer): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) old_policies, old_values = net(states) old_policies = old_policies.view(-1, net.num_outputs).detach() returns, advantages = net.get_gae(old_values.view(-1).detach(), rewards, masks) batch_maker = BatchMaker(states, actions, returns, advantages, old_policies) for _ in range(epoch_k): for _ in range(len(states) // batch_size): (states_sample, actions_sample, returns_sample, advantages_sample, old_policies_sample ) = batch_maker.sample() policies, values = net(states_sample) values = values.view(-1) policies = policies.view(-1, net.num_outputs) ratios = (policies / old_policies_sample * actions_sample. detach()).sum(dim=1) clipped_ratios = torch.clamp(ratios, min=1.0 - epsilon_clip, max=1.0 + epsilon_clip) actor_loss = -torch.min(ratios * advantages_sample, clipped_ratios * advantages_sample).sum() critic_loss = (returns_sample.detach() - values).pow(2).sum() policy_entropy = (torch.log(policies) * policies).sum(1, keepdim=True).mean() loss = (actor_loss + ciritic_coefficient * critic_loss - entropy_coefficient * policy_entropy) optimizer.zero_grad() loss.backward() optimizer.step() return loss def get_action(self, input): policy, _ = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import random import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf7, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_7 return buf4, reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf4, primals_6, primals_4, buf7 class BatchMaker: def __init__(self, states, actions, returns, advantages, old_policies): self.states = states self.actions = actions self.returns = returns self.advantages = advantages self.old_policies = old_policies def sample(self): sample_indexes = random.sample(range(len(self.states)), batch_size) states_sample = self.states[sample_indexes] actions_sample = self.actions[sample_indexes] retruns_sample = self.returns[sample_indexes] advantages_sample = self.advantages[sample_indexes] old_policies_sample = self.old_policies[sample_indexes] return (states_sample, actions_sample, retruns_sample, advantages_sample, old_policies_sample) class PPONew(nn.Module): def __init__(self, num_inputs, num_outputs): super(PPONew, self).__init__() self.t = 0 self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc = nn.Linear(num_inputs, 128) self.fc_actor = nn.Linear(128, num_outputs) self.fc_critic = nn.Linear(128, 1) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) @classmethod def get_gae(self, values, rewards, masks): returns = torch.zeros_like(rewards) advantages = torch.zeros_like(rewards) running_return = 0 previous_value = 0 running_advantage = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] running_tderror = rewards[t] + gamma * previous_value * masks[t ] - values.data[t] running_advantage = (running_tderror + gamma * lambda_gae * running_advantage * masks[t]) returns[t] = running_return previous_value = values.data[t] advantages[t] = running_advantage return returns, advantages @classmethod def train_model(cls, net, transitions, optimizer): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) old_policies, old_values = net(states) old_policies = old_policies.view(-1, net.num_outputs).detach() returns, advantages = net.get_gae(old_values.view(-1).detach(), rewards, masks) batch_maker = BatchMaker(states, actions, returns, advantages, old_policies) for _ in range(epoch_k): for _ in range(len(states) // batch_size): (states_sample, actions_sample, returns_sample, advantages_sample, old_policies_sample ) = batch_maker.sample() policies, values = net(states_sample) values = values.view(-1) policies = policies.view(-1, net.num_outputs) ratios = (policies / old_policies_sample * actions_sample. detach()).sum(dim=1) clipped_ratios = torch.clamp(ratios, min=1.0 - epsilon_clip, max=1.0 + epsilon_clip) actor_loss = -torch.min(ratios * advantages_sample, clipped_ratios * advantages_sample).sum() critic_loss = (returns_sample.detach() - values).pow(2).sum() policy_entropy = (torch.log(policies) * policies).sum(1, keepdim=True).mean() loss = (actor_loss + ciritic_coefficient * critic_loss - entropy_coefficient * policy_entropy) optimizer.zero_grad() loss.backward() optimizer.step() return loss def get_action(self, input): policy, _ = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_4 = self.fc_actor.weight primals_5 = self.fc_actor.bias primals_6 = self.fc_critic.weight primals_7 = self.fc_critic.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
g6ling/Pytorch-Cartpole
PPO
false
15,385
[ "MIT" ]
116
ecb7b622cfefe825ac95388cceb6752413d90a2a
https://github.com/g6ling/Pytorch-Cartpole/tree/ecb7b622cfefe825ac95388cceb6752413d90a2a
BCEDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qg/cqgruwq7hcawuuvrunirvgwvwwr7rfwjjzzm7xj5trpvzol42ylj.py # Topologically Sorted Source Nodes: [binary_cross_entropy], Original ATen: [aten.binary_cross_entropy] # Source node to ATen node mapping: # binary_cross_entropy => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) triton_per_fused_binary_cross_entropy_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ln/clntl2hql2cgrmqfbu4rklgkakwrcoehwedcnzouxkf42pfepmzu.py # Topologically Sorted Source Nodes: [binary_cross_entropy, product, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, coefficient, batch_loss, product_1, intersection_1, mul_3, add_4, sum_5, sum_6, add_5, add_6, coefficient_1, batch_loss_1, product_2, intersection_2, mul_5, add_7, sum_8, sum_9, add_8, add_9, coefficient_2, batch_loss_2, product_3, intersection_3, mul_7, add_10, sum_11, sum_12, add_11, add_12, coefficient_3, batch_loss_3, batch_loss_4, sub, add_13], Original ATen: [aten.binary_cross_entropy, aten.mul, aten.sum, aten.add, aten.div, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_10 => add_12 # add_11 => add_13 # add_12 => add_14 # add_13 => add_16 # add_2 => add_2 # add_4 => add_4 # add_5 => add_5 # add_6 => add_6 # add_7 => add_8 # add_8 => add_9 # add_9 => add_10 # batch_loss => add_3 # batch_loss_1 => add_7 # batch_loss_2 => add_11 # batch_loss_3 => add_15 # batch_loss_4 => div_4 # binary_cross_entropy => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1 # coefficient => div # coefficient_1 => div_1 # coefficient_2 => div_2 # coefficient_3 => div_3 # intersection => sum_1 # intersection_1 => sum_4 # intersection_2 => sum_7 # intersection_3 => sum_10 # mul_1 => mul_3 # mul_3 => mul_5 # mul_5 => mul_7 # mul_7 => mul_9 # product => mul_2 # product_1 => mul_4 # product_2 => mul_6 # product_3 => mul_8 # sub => sub_2 # sum_11 => sum_11 # sum_12 => sum_12 # sum_2 => sum_2 # sum_3 => sum_3 # sum_5 => sum_5 # sum_6 => sum_6 # sum_8 => sum_8 # sum_9 => sum_9 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %select_3), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_4,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 1), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_2,), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_3,), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, %sum_6), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, 1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_4, %add_6), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %div_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %select_5), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_6,), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_7, 2), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, 1), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_4,), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_5,), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_8, %sum_9), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, 1), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_8, %add_10), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %div_2), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_6, %select_7), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_8,), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_10, 2), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, 1), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_6,), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_7,), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_11, %sum_12), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, 1), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_12, %add_14), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %div_3), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_15, 4), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_4), kwargs = {}) # %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %sub_2), kwargs = {}) triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 12, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp12 = tl.load(in_ptr0 + (64 + r0), None) tmp13 = tl.load(in_ptr1 + (64 + r0), None) tmp24 = tl.load(in_ptr0 + (128 + r0), None) tmp25 = tl.load(in_ptr1 + (128 + r0), None) tmp36 = tl.load(in_ptr0 + (192 + r0), None) tmp37 = tl.load(in_ptr1 + (192 + r0), None) tmp75 = tl.load(in_out_ptr1 + (0)) tmp76 = tl.broadcast_to(tmp75, [XBLOCK, 1]) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 2.0 tmp49 = tmp5 * tmp48 tmp50 = 1.0 tmp51 = tmp49 + tmp50 tmp52 = tmp8 + tmp11 tmp53 = tmp52 + tmp50 tmp54 = tmp51 / tmp53 tmp55 = 0.0 tmp56 = tmp54 + tmp55 tmp57 = tmp17 * tmp48 tmp58 = tmp57 + tmp50 tmp59 = tmp20 + tmp23 tmp60 = tmp59 + tmp50 tmp61 = tmp58 / tmp60 tmp62 = tmp56 + tmp61 tmp63 = tmp29 * tmp48 tmp64 = tmp63 + tmp50 tmp65 = tmp32 + tmp35 tmp66 = tmp65 + tmp50 tmp67 = tmp64 / tmp66 tmp68 = tmp62 + tmp67 tmp69 = tmp41 * tmp48 tmp70 = tmp69 + tmp50 tmp71 = tmp44 + tmp47 tmp72 = tmp71 + tmp50 tmp73 = tmp70 / tmp72 tmp74 = tmp68 + tmp73 tmp77 = 256.0 tmp78 = tmp76 / tmp77 tmp79 = 0.25 tmp80 = tmp74 * tmp79 tmp81 = tmp50 - tmp80 tmp82 = tmp78 + tmp81 tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp82, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [binary_cross_entropy], Original ATen: [aten.binary_cross_entropy] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) buf14 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy, product, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, coefficient, batch_loss, product_1, intersection_1, mul_3, add_4, sum_5, sum_6, add_5, add_6, coefficient_1, batch_loss_1, product_2, intersection_2, mul_5, add_7, sum_8, sum_9, add_8, add_9, coefficient_2, batch_loss_2, product_3, intersection_3, mul_7, add_10, sum_11, sum_12, add_11, add_12, coefficient_3, batch_loss_3, batch_loss_4, sub, add_13], Original ATen: [aten.binary_cross_entropy, aten.mul, aten.sum, aten.add, aten.div, aten.rsub] triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1.run(buf14, arg1_1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf14, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class DiceLoss(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLoss, self).__init__() def forward(self, predicted, target): """ Method for calculation of loss from sample. Parameters: predicted(torch.Tensor): Predicted output of the network. Shape - (Batch Size,Channel,Height,Width) target(torch.Tensor): Actual required output for the network Shape - (Batch Size,Channel,Height,Width) Returns: The mean dice Loss over the batch size. """ batch = predicted.size()[0] batch_loss = 0 for index in range(batch): coefficient = self._dice_coefficient(predicted[index], target[ index]) batch_loss += coefficient batch_loss = batch_loss / batch return 1 - batch_loss def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient class BCEDiceLoss(nn.Module): """ Combination of Binary Cross Entropy Loss and Soft Dice Loss. This combined loss is used to train the network so that both benefits of the loss are leveraged. """ def __init__(self, device): """Simple constructor for the class.""" super(BCEDiceLoss, self).__init__() self.dice_loss = DiceLoss() def forward(self, predicted, target): """ Method for calculation of combined loss from sample.""" return F.binary_cross_entropy(predicted, target) + self.dice_loss( predicted, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) @triton.jit def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr0 + (64 + r0), None) tmp13 = tl.load(in_ptr1 + (64 + r0), None) tmp24 = tl.load(in_ptr0 + (128 + r0), None) tmp25 = tl.load(in_ptr1 + (128 + r0), None) tmp36 = tl.load(in_ptr0 + (192 + r0), None) tmp37 = tl.load(in_ptr1 + (192 + r0), None) tmp75 = tl.load(in_out_ptr1 + 0) tmp76 = tl.broadcast_to(tmp75, [XBLOCK, 1]) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 2.0 tmp49 = tmp5 * tmp48 tmp50 = 1.0 tmp51 = tmp49 + tmp50 tmp52 = tmp8 + tmp11 tmp53 = tmp52 + tmp50 tmp54 = tmp51 / tmp53 tmp55 = 0.0 tmp56 = tmp54 + tmp55 tmp57 = tmp17 * tmp48 tmp58 = tmp57 + tmp50 tmp59 = tmp20 + tmp23 tmp60 = tmp59 + tmp50 tmp61 = tmp58 / tmp60 tmp62 = tmp56 + tmp61 tmp63 = tmp29 * tmp48 tmp64 = tmp63 + tmp50 tmp65 = tmp32 + tmp35 tmp66 = tmp65 + tmp50 tmp67 = tmp64 / tmp66 tmp68 = tmp62 + tmp67 tmp69 = tmp41 * tmp48 tmp70 = tmp69 + tmp50 tmp71 = tmp44 + tmp47 tmp72 = tmp71 + tmp50 tmp73 = tmp70 / tmp72 tmp74 = tmp68 + tmp73 tmp77 = 256.0 tmp78 = tmp76 / tmp77 tmp79 = 0.25 tmp80 = tmp74 * tmp79 tmp81 = tmp50 - tmp80 tmp82 = tmp78 + tmp81 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp82, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_binary_cross_entropy_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf14 = buf0 del buf0 triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1[grid(1)]( buf14, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf14, class DiceLoss(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLoss, self).__init__() def forward(self, predicted, target): """ Method for calculation of loss from sample. Parameters: predicted(torch.Tensor): Predicted output of the network. Shape - (Batch Size,Channel,Height,Width) target(torch.Tensor): Actual required output for the network Shape - (Batch Size,Channel,Height,Width) Returns: The mean dice Loss over the batch size. """ batch = predicted.size()[0] batch_loss = 0 for index in range(batch): coefficient = self._dice_coefficient(predicted[index], target[ index]) batch_loss += coefficient batch_loss = batch_loss / batch return 1 - batch_loss def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient class BCEDiceLossNew(nn.Module): """ Combination of Binary Cross Entropy Loss and Soft Dice Loss. This combined loss is used to train the network so that both benefits of the loss are leveraged. """ def __init__(self, device): """Simple constructor for the class.""" super(BCEDiceLossNew, self).__init__() self.dice_loss = DiceLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
g-freire/Brain-Tumor-Segmentation
BCEDiceLoss
false
15,386
[ "MIT" ]
156
e4f258feb64c11815570e295c58bda78afd21ab9
https://github.com/g-freire/Brain-Tumor-Segmentation/tree/e4f258feb64c11815570e295c58bda78afd21ab9
TNPG
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/hr/chrb4easzia4dw753qymwrkwdvp5554y3k2pc6zt3dtjbxgoihzj.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] # Source node to ATen node mapping: # policy => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] # Source node to ATen node mapping: # policy => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def flat_grad(grads): grad_flatten = [] for grad in grads: grad_flatten.append(grad.view(-1)) grad_flatten = torch.cat(grad_flatten) return grad_flatten def flat_hessian(hessians): hessians_flatten = [] for hessian in hessians: hessians_flatten.append(hessian.contiguous().view(-1)) hessians_flatten = torch.cat(hessians_flatten).data return hessians_flatten def kl_divergence(policy, old_policy): kl = old_policy * torch.log(old_policy / policy) kl = kl.sum(1, keepdim=True) return kl def fisher_vector_product(net, states, p, cg_damp=0.1): policy = net(states) old_policy = net(states).detach() kl = kl_divergence(policy, old_policy) kl = kl.mean() kl_grad = torch.autograd.grad(kl, net.parameters(), create_graph=True) kl_grad = flat_grad(kl_grad) kl_grad_p = (kl_grad * p.detach()).sum() kl_hessian_p = torch.autograd.grad(kl_grad_p, net.parameters()) kl_hessian_p = flat_hessian(kl_hessian_p) return kl_hessian_p + cg_damp * p.detach() def conjugate_gradient(net, states, loss_grad, n_step=10, residual_tol=1e-10): x = torch.zeros(loss_grad.size()) r = loss_grad.clone() p = loss_grad.clone() r_dot_r = torch.dot(r, r) for i in range(n_step): A_dot_p = fisher_vector_product(net, states, p) alpha = r_dot_r / torch.dot(p, A_dot_p) x += alpha * p r -= alpha * A_dot_p new_r_dot_r = torch.dot(r, r) betta = new_r_dot_r / r_dot_r p = r + betta * p r_dot_r = new_r_dot_r if r_dot_r < residual_tol: break return x def flat_params(model): params = [] for param in model.parameters(): params.append(param.data.view(-1)) params_flatten = torch.cat(params) return params_flatten def update_model(model, new_params): index = 0 for params in model.parameters(): params_length = len(params.view(-1)) new_param = new_params[index:index + params_length] new_param = new_param.view(params.size()) params.data.copy_(new_param) index += params_length class TNPG(nn.Module): def __init__(self, num_inputs, num_outputs): super(TNPG, self).__init__() self.t = 0 self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc_1 = nn.Linear(num_inputs, 128) self.fc_2 = nn.Linear(128, num_outputs) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) def forward(self, input): x = torch.tanh(self.fc_1(input)) policy = F.softmax(self.fc_2(x)) return policy @classmethod def train_model(cls, net, transitions): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) returns = torch.zeros_like(rewards) running_return = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] returns[t] = running_return policies = net(states) policies = policies.view(-1, net.num_outputs) policy_actions = (policies * actions.detach()).sum(dim=1) loss = (policy_actions * returns).mean() loss_grad = torch.autograd.grad(loss, net.parameters()) loss_grad = flat_grad(loss_grad) step_dir = conjugate_gradient(net, states, loss_grad.data) params = flat_params(net) new_params = params + lr * step_dir update_model(net, new_params) return -loss def get_action(self, input): policy = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(8192)](buf1, primals_2, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf4, primals_4 def flat_grad(grads): grad_flatten = [] for grad in grads: grad_flatten.append(grad.view(-1)) grad_flatten = torch.cat(grad_flatten) return grad_flatten def flat_hessian(hessians): hessians_flatten = [] for hessian in hessians: hessians_flatten.append(hessian.contiguous().view(-1)) hessians_flatten = torch.cat(hessians_flatten).data return hessians_flatten def kl_divergence(policy, old_policy): kl = old_policy * torch.log(old_policy / policy) kl = kl.sum(1, keepdim=True) return kl def fisher_vector_product(net, states, p, cg_damp=0.1): policy = net(states) old_policy = net(states).detach() kl = kl_divergence(policy, old_policy) kl = kl.mean() kl_grad = torch.autograd.grad(kl, net.parameters(), create_graph=True) kl_grad = flat_grad(kl_grad) kl_grad_p = (kl_grad * p.detach()).sum() kl_hessian_p = torch.autograd.grad(kl_grad_p, net.parameters()) kl_hessian_p = flat_hessian(kl_hessian_p) return kl_hessian_p + cg_damp * p.detach() def conjugate_gradient(net, states, loss_grad, n_step=10, residual_tol=1e-10): x = torch.zeros(loss_grad.size()) r = loss_grad.clone() p = loss_grad.clone() r_dot_r = torch.dot(r, r) for i in range(n_step): A_dot_p = fisher_vector_product(net, states, p) alpha = r_dot_r / torch.dot(p, A_dot_p) x += alpha * p r -= alpha * A_dot_p new_r_dot_r = torch.dot(r, r) betta = new_r_dot_r / r_dot_r p = r + betta * p r_dot_r = new_r_dot_r if r_dot_r < residual_tol: break return x def flat_params(model): params = [] for param in model.parameters(): params.append(param.data.view(-1)) params_flatten = torch.cat(params) return params_flatten def update_model(model, new_params): index = 0 for params in model.parameters(): params_length = len(params.view(-1)) new_param = new_params[index:index + params_length] new_param = new_param.view(params.size()) params.data.copy_(new_param) index += params_length class TNPGNew(nn.Module): def __init__(self, num_inputs, num_outputs): super(TNPGNew, self).__init__() self.t = 0 self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc_1 = nn.Linear(num_inputs, 128) self.fc_2 = nn.Linear(128, num_outputs) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) @classmethod def train_model(cls, net, transitions): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) returns = torch.zeros_like(rewards) running_return = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] returns[t] = running_return policies = net(states) policies = policies.view(-1, net.num_outputs) policy_actions = (policies * actions.detach()).sum(dim=1) loss = (policy_actions * returns).mean() loss_grad = torch.autograd.grad(loss, net.parameters()) loss_grad = flat_grad(loss_grad) step_dir = conjugate_gradient(net, states, loss_grad.data) params = flat_params(net) new_params = params + lr * step_dir update_model(net, new_params) return -loss def get_action(self, input): policy = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def forward(self, input_0): primals_1 = self.fc_1.weight primals_2 = self.fc_1.bias primals_4 = self.fc_2.weight primals_5 = self.fc_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
g6ling/Pytorch-Cartpole
TNPG
false
15,387
[ "MIT" ]
116
ecb7b622cfefe825ac95388cceb6752413d90a2a
https://github.com/g6ling/Pytorch-Cartpole/tree/ecb7b622cfefe825ac95388cceb6752413d90a2a
TRPO
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] # Source node to ATen node mapping: # policy => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] # Source node to ATen node mapping: # policy => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf4, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def flat_grad(grads): grad_flatten = [] for grad in grads: grad_flatten.append(grad.view(-1)) grad_flatten = torch.cat(grad_flatten) return grad_flatten def flat_hessian(hessians): hessians_flatten = [] for hessian in hessians: hessians_flatten.append(hessian.contiguous().view(-1)) hessians_flatten = torch.cat(hessians_flatten).data return hessians_flatten def kl_divergence(policy, old_policy): kl = old_policy * torch.log(old_policy / policy) kl = kl.sum(1, keepdim=True) return kl def fisher_vector_product(net, states, p, cg_damp=0.1): policy = net(states) old_policy = net(states).detach() kl = kl_divergence(policy, old_policy) kl = kl.mean() kl_grad = torch.autograd.grad(kl, net.parameters(), create_graph=True) kl_grad = flat_grad(kl_grad) kl_grad_p = (kl_grad * p.detach()).sum() kl_hessian_p = torch.autograd.grad(kl_grad_p, net.parameters()) kl_hessian_p = flat_hessian(kl_hessian_p) return kl_hessian_p + cg_damp * p.detach() def conjugate_gradient(net, states, loss_grad, n_step=10, residual_tol=1e-10): x = torch.zeros(loss_grad.size()) r = loss_grad.clone() p = loss_grad.clone() r_dot_r = torch.dot(r, r) for i in range(n_step): A_dot_p = fisher_vector_product(net, states, p) alpha = r_dot_r / torch.dot(p, A_dot_p) x += alpha * p r -= alpha * A_dot_p new_r_dot_r = torch.dot(r, r) betta = new_r_dot_r / r_dot_r p = r + betta * p r_dot_r = new_r_dot_r if r_dot_r < residual_tol: break return x def flat_params(model): params = [] for param in model.parameters(): params.append(param.data.view(-1)) params_flatten = torch.cat(params) return params_flatten def update_model(model, new_params): index = 0 for params in model.parameters(): params_length = len(params.view(-1)) new_param = new_params[index:index + params_length] new_param = new_param.view(params.size()) params.data.copy_(new_param) index += params_length class TRPO(nn.Module): def __init__(self, num_inputs, num_outputs): super(TRPO, self).__init__() self.t = 0 self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc_1 = nn.Linear(num_inputs, 128) self.fc_2 = nn.Linear(128, num_outputs) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) def forward(self, input): x = torch.relu(self.fc_1(input)) policy = F.softmax(self.fc_2(x)) return policy @classmethod def train_model(cls, net, transitions): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) returns = torch.zeros_like(rewards) running_return = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] returns[t] = running_return policy = net(states) policy = policy.view(-1, net.num_outputs) policy_action = (policy * actions.detach()).sum(dim=1) old_policy = net(states).detach() old_policy = old_policy.view(-1, net.num_outputs) old_policy_action = (old_policy * actions.detach()).sum(dim=1) surrogate_loss = (policy_action / old_policy_action * returns).mean() surrogate_loss_grad = torch.autograd.grad(surrogate_loss, net. parameters()) surrogate_loss_grad = flat_grad(surrogate_loss_grad) step_dir = conjugate_gradient(net, states, surrogate_loss_grad.data) params = flat_params(net) shs = (step_dir * fisher_vector_product(net, states, step_dir)).sum( 0, keepdim=True) step_size = torch.sqrt(2 * max_kl / shs)[0] full_step = step_size * step_dir fraction = 1.0 for _ in range(10): new_params = params + fraction * full_step update_model(net, new_params) policy = net(states) policy = policy.view(-1, net.num_outputs) policy_action = (policy * actions.detach()).sum(dim=1) surrogate_loss = (policy_action / old_policy_action * returns ).mean() kl = kl_divergence(policy, old_policy) kl = kl.mean() if kl < max_kl: break fraction = fraction * 0.5 return -surrogate_loss def get_action(self, input): policy = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf4, primals_4, buf5 def flat_grad(grads): grad_flatten = [] for grad in grads: grad_flatten.append(grad.view(-1)) grad_flatten = torch.cat(grad_flatten) return grad_flatten def flat_hessian(hessians): hessians_flatten = [] for hessian in hessians: hessians_flatten.append(hessian.contiguous().view(-1)) hessians_flatten = torch.cat(hessians_flatten).data return hessians_flatten def kl_divergence(policy, old_policy): kl = old_policy * torch.log(old_policy / policy) kl = kl.sum(1, keepdim=True) return kl def fisher_vector_product(net, states, p, cg_damp=0.1): policy = net(states) old_policy = net(states).detach() kl = kl_divergence(policy, old_policy) kl = kl.mean() kl_grad = torch.autograd.grad(kl, net.parameters(), create_graph=True) kl_grad = flat_grad(kl_grad) kl_grad_p = (kl_grad * p.detach()).sum() kl_hessian_p = torch.autograd.grad(kl_grad_p, net.parameters()) kl_hessian_p = flat_hessian(kl_hessian_p) return kl_hessian_p + cg_damp * p.detach() def conjugate_gradient(net, states, loss_grad, n_step=10, residual_tol=1e-10): x = torch.zeros(loss_grad.size()) r = loss_grad.clone() p = loss_grad.clone() r_dot_r = torch.dot(r, r) for i in range(n_step): A_dot_p = fisher_vector_product(net, states, p) alpha = r_dot_r / torch.dot(p, A_dot_p) x += alpha * p r -= alpha * A_dot_p new_r_dot_r = torch.dot(r, r) betta = new_r_dot_r / r_dot_r p = r + betta * p r_dot_r = new_r_dot_r if r_dot_r < residual_tol: break return x def flat_params(model): params = [] for param in model.parameters(): params.append(param.data.view(-1)) params_flatten = torch.cat(params) return params_flatten def update_model(model, new_params): index = 0 for params in model.parameters(): params_length = len(params.view(-1)) new_param = new_params[index:index + params_length] new_param = new_param.view(params.size()) params.data.copy_(new_param) index += params_length class TRPONew(nn.Module): def __init__(self, num_inputs, num_outputs): super(TRPONew, self).__init__() self.t = 0 self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc_1 = nn.Linear(num_inputs, 128) self.fc_2 = nn.Linear(128, num_outputs) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) @classmethod def train_model(cls, net, transitions): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) returns = torch.zeros_like(rewards) running_return = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] returns[t] = running_return policy = net(states) policy = policy.view(-1, net.num_outputs) policy_action = (policy * actions.detach()).sum(dim=1) old_policy = net(states).detach() old_policy = old_policy.view(-1, net.num_outputs) old_policy_action = (old_policy * actions.detach()).sum(dim=1) surrogate_loss = (policy_action / old_policy_action * returns).mean() surrogate_loss_grad = torch.autograd.grad(surrogate_loss, net. parameters()) surrogate_loss_grad = flat_grad(surrogate_loss_grad) step_dir = conjugate_gradient(net, states, surrogate_loss_grad.data) params = flat_params(net) shs = (step_dir * fisher_vector_product(net, states, step_dir)).sum( 0, keepdim=True) step_size = torch.sqrt(2 * max_kl / shs)[0] full_step = step_size * step_dir fraction = 1.0 for _ in range(10): new_params = params + fraction * full_step update_model(net, new_params) policy = net(states) policy = policy.view(-1, net.num_outputs) policy_action = (policy * actions.detach()).sum(dim=1) surrogate_loss = (policy_action / old_policy_action * returns ).mean() kl = kl_divergence(policy, old_policy) kl = kl.mean() if kl < max_kl: break fraction = fraction * 0.5 return -surrogate_loss def get_action(self, input): policy = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def forward(self, input_0): primals_1 = self.fc_1.weight primals_2 = self.fc_1.bias primals_4 = self.fc_2.weight primals_5 = self.fc_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
g6ling/Pytorch-Cartpole
TRPO
false
15,388
[ "MIT" ]
116
ecb7b622cfefe825ac95388cceb6752413d90a2a
https://github.com/g6ling/Pytorch-Cartpole/tree/ecb7b622cfefe825ac95388cceb6752413d90a2a
ResNetV2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/a2/ca2l7bjxfwrklzvcxfa2hnyzqh3p6neak37vi6fkugdhbu26fbpz.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 768 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (147*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/co/ccosum7u5lx5fx5hf5opofiygxj2ntiq67yo5gfegevmhtkaru4r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qg/cqg4z653mpzmif22rwtpmv42y4lbkkxhxjqguwoxl3wb6cn5fn7k.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 262144 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ot/cotn5a2cqhwvdw4ugt6b2a4jl2ou2mh37mnmwxgwogdqw4kcufhp.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1048576 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = (yindex // 1024) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (1024*x2) + (9216*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7n/c7npfx4cng24bae4uqu2hpgblpis6j6mmnvhinuzjms74o3kespg.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4194304 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 2048 y1 = (yindex // 2048) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (2048*x2) + (18432*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ck/cck257ksuszi7rylew7fge2srrwr6phqjm3wbowg7merkmnxmshd.py # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add => add # sqrt => sqrt # sub => sub # var_mean => var_mean # w => div # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-10), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_6 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_6(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 147 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (147*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 147, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask & xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 147.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-10 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(out_ptr1 + (r1 + (147*x0)), tmp23, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nt/cntsn7m3r2xjxa4wdf4m34w7p5wrpvazgixessnri35z7hkzioyv.py # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # input_2 => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution, [1, 1, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_7 = async_compile.triton('triton_poi_fused_constant_pad_nd_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1183744 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 8704) % 34 x1 = (xindex // 256) % 34 x3 = (xindex // 295936) x4 = xindex % 8704 x6 = xindex tmp0 = (-1) + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x1 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-8448) + x4 + (8192*x2) + (262144*x3)), tmp10, other=0.0) tl.store(out_ptr0 + (x6), tmp11, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bf/cbf5dxwqyr3hqcx4qflqp4smyzjh74okbtbp3q4x6lxhqo5bx6kt.py # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_3 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = (xindex // 256) % 16 x2 = (xindex // 4096) % 16 x3 = (xindex // 65536) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (17408*x2) + (295936*x3)), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None) tmp3 = tl.load(in_ptr0 + (512 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None) tmp5 = tl.load(in_ptr0 + (8704 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None) tmp7 = tl.load(in_ptr0 + (8960 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None) tmp9 = tl.load(in_ptr0 + (9216 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None) tmp11 = tl.load(in_ptr0 + (17408 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None) tmp13 = tl.load(in_ptr0 + (17664 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None) tmp15 = tl.load(in_ptr0 + (17920 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + (x4), tmp16, None) tl.store(out_ptr1 + (x4), tmp41, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/v6/cv6xitrbrydxmqmrppdwzu4z4evlalp2yvk4c5vppkczm3j3ligg.py # Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm => add_1, rsqrt, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_red_fused_native_group_norm_9 = async_compile.triton('triton_red_fused_native_group_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_9(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = (rindex // 8) tmp0 = tl.load(in_ptr0 + (r2 + (8*x0) + (256*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fa/cfasmvqd2wokec24hd5leef4rosb4wbws23u3moszdu6v4nwesae.py # Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm => add_2, mul_1 # out => relu # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_2,), kwargs = {}) triton_poi_fused_native_group_norm_relu_10 = async_compile.triton('triton_poi_fused_native_group_norm_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = (xindex // 65536) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7p/c7p6yuwjc2qmdlwhk2ntbb3enj6br3kuvouoikxqtnwnwwyibm2m.py # Topologically Sorted Source Nodes: [var_mean_1, sub_1, add_1, sqrt_1, w_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_1 => add_3 # sqrt_1 => sqrt_1 # sub_1 => sub_2 # var_mean_1 => var_mean_2 # w_1 => div_1 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_5, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_5, %getitem_7), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-10), kwargs = {}) # %sqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_3,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sqrt_1), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_11 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1024, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1024 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hy/chyilz3henugk2yh7bv56i2xnaqemzqpl6po2ro6wijtbh46vtdh.py # Topologically Sorted Source Nodes: [var_mean_2, sub_2, add_2, sqrt_2, w_2], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_2 => add_4 # sqrt_2 => sqrt_2 # sub_2 => sub_3 # var_mean_2 => var_mean_3 # w_2 => div_2 # Graph fragment: # %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_6, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_6, %getitem_9), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-10), kwargs = {}) # %sqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_4,), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_3, %sqrt_2), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_12 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_12(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kp/ckptltlmncjptuq6aynv7jisgu7jzdkn64j44ats2eobzcfsf3lf.py # Topologically Sorted Source Nodes: [var_mean_3, sub_3, add_3, sqrt_3, w_3], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_3 => add_7 # sqrt_3 => sqrt_3 # sub_3 => sub_5 # var_mean_3 => var_mean_5 # w_3 => div_3 # Graph fragment: # %var_mean_5 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_9, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_9, %getitem_13), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_12, 1e-10), kwargs = {}) # %sqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_7,), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_5, %sqrt_3), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_13 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[256, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_13(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 256 rnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2304*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2304.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2304*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2304*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fl/cflhmffcentzihxd2lm4f4c7jl6bncsabeh44cewr2fojudyc2k4.py # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.add] # Source node to ATen node mapping: # input_4 => add_11 # Graph fragment: # %add_11 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_4, %convolution_1), kwargs = {}) triton_poi_fused_add_14 = async_compile.triton('triton_poi_fused_add_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = tl.load(in_out_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/si/csiiojtlgokw2jw42ubk2xbgw5jlgcxayafs3q7zswsfedxizwoc.py # Topologically Sorted Source Nodes: [group_norm_3], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_3 => add_12, rsqrt_3, var_mean_8 # Graph fragment: # %var_mean_8 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_6, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_18, 1e-05), kwargs = {}) # %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_12,), kwargs = {}) triton_red_fused_native_group_norm_15 = async_compile.triton('triton_red_fused_native_group_norm_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = (rindex // 32) tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (262144*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pq/cpqmt4ehbn6is274yejx4zty2dzaa7qy7b6yhnpuy3tx6223wuua.py # Topologically Sorted Source Nodes: [group_norm_3, out_4], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_3 => add_13, mul_7 # out_4 => relu_3 # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %unsqueeze_23), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_20), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_13,), kwargs = {}) triton_poi_fused_native_group_norm_relu_16 = async_compile.triton('triton_poi_fused_native_group_norm_relu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 262144) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 8192.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rf/crfnkyefyns5oxfvn3xcagl2davuc4ecbrrklayhwc5y4v54ackv.py # Topologically Sorted Source Nodes: [var_mean_5, sub_5, add_6, sqrt_5, w_5], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_6 => add_14 # sqrt_5 => sqrt_5 # sub_5 => sub_9 # var_mean_5 => var_mean_9 # w_5 => div_5 # Graph fragment: # %var_mean_9 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_15, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_15, %getitem_21), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_20, 1e-10), kwargs = {}) # %sqrt_5 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_14,), kwargs = {}) # %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_9, %sqrt_5), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_17 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2i/c2iroi5tyq6bdv2nnl6vpicnvrvjqphovygeuprbla3wfoavamb7.py # Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.add] # Source node to ATen node mapping: # input_5 => add_21 # Graph fragment: # %add_21 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_7, %add_11), kwargs = {}) triton_poi_fused_add_18 = async_compile.triton('triton_poi_fused_add_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6a/c6aj2cpzdg3dhbfhkkua4xiwtcie4yfjb4mh66com5kropbmjwni.py # Topologically Sorted Source Nodes: [var_mean_14, sub_14, add_18, sqrt_14, w_14], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_18 => add_44 # sqrt_14 => sqrt_14 # sub_14 => sub_27 # var_mean_14 => var_mean_27 # w_14 => div_14 # Graph fragment: # %var_mean_27 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_42, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_42, %getitem_57), kwargs = {}) # %add_44 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_56, 1e-10), kwargs = {}) # %sqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_44,), kwargs = {}) # %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_27, %sqrt_14), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_19 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[2048, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 2048 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/o6/co6t5ttpyhicnphta2nl3sop7ipjogaapb57hy7c7z6oibxmwpd7.py # Topologically Sorted Source Nodes: [var_mean_15, sub_15, add_19, sqrt_15, w_15], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_19 => add_45 # sqrt_15 => sqrt_15 # sub_15 => sub_28 # var_mean_15 => var_mean_28 # w_15 => div_15 # Graph fragment: # %var_mean_28 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_43, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_43, %getitem_59), kwargs = {}) # %add_45 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_58, 1e-10), kwargs = {}) # %sqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_45,), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_28, %sqrt_15), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_20 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[512, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_20(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 512 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/am/cam45fkuyx7dbg52ucmhl65jnrvcnlo7pyx6a2j5lvsd5bviopuj.py # Topologically Sorted Source Nodes: [group_norm_13], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_13 => add_46, rsqrt_13, var_mean_29 # Graph fragment: # %var_mean_29 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_26, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_46 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_60, 1e-05), kwargs = {}) # %rsqrt_13 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_46,), kwargs = {}) triton_red_fused_native_group_norm_21 = async_compile.triton('triton_red_fused_native_group_norm_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_21(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 16 r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (131072*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fy/cfy57czzc2qkjv6pjz77rgn32p5crruqb55nl4xixwalsbs5mwei.py # Topologically Sorted Source Nodes: [group_norm_13, relu_13], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_13 => add_47, mul_27 # relu_13 => relu_13 # Graph fragment: # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_27, %unsqueeze_83), kwargs = {}) # %add_47 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_27, %unsqueeze_80), kwargs = {}) # %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_47,), kwargs = {}) triton_poi_fused_native_group_norm_relu_22 = async_compile.triton('triton_poi_fused_native_group_norm_relu_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = (xindex // 131072) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 4096.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uo/cuodaulb624waag556poxfsnkpkczmwmuq2d6mr6bn4hx4wwlfjh.py # Topologically Sorted Source Nodes: [var_mean_16, sub_16, add_20, sqrt_16, w_16], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_20 => add_48 # sqrt_16 => sqrt_16 # sub_16 => sub_30 # var_mean_16 => var_mean_30 # w_16 => div_16 # Graph fragment: # %var_mean_30 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_46, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_46, %getitem_63), kwargs = {}) # %add_48 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_62, 1e-10), kwargs = {}) # %sqrt_16 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_48,), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_30, %sqrt_16), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_23 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_23', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_23(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (4608*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 4608.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (4608*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (4608*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7q/c7qjvpiszscw2ipcio4ffhrmgfbubit4radh6syg4zqvckqbz52z.py # Topologically Sorted Source Nodes: [group_norm_14], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_14 => add_49, rsqrt_14, var_mean_31 # Graph fragment: # %var_mean_31 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_28, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_64, 1e-05), kwargs = {}) # %rsqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_49,), kwargs = {}) triton_per_fused_native_group_norm_24 = async_compile.triton('triton_per_fused_native_group_norm_24', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_24', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_24(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = (rindex // 16) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (32768*x1)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x4), tmp18, None) tl.store(out_ptr0 + (x4), tmp8, None) tl.store(out_ptr1 + (x4), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ad/cadjzg3pzepovud42t5i6gbftlolabw3fp27unoow5hoyvjliyaa.py # Topologically Sorted Source Nodes: [group_norm_14, relu_14], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_14 => add_50, mul_29 # relu_14 => relu_14 # Graph fragment: # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_29, %unsqueeze_89), kwargs = {}) # %add_50 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_29, %unsqueeze_86), kwargs = {}) # %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_50,), kwargs = {}) triton_poi_fused_native_group_norm_relu_25 = async_compile.triton('triton_poi_fused_native_group_norm_relu_25', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_25(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = (xindex // 32768) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mr/cmru36jvazn4nvftmbz3p4esd55x5bjsn5arm2fs34ru32eusb5g.py # Topologically Sorted Source Nodes: [var_mean_17, sub_17, add_21, sqrt_17, w_17], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_21 => add_51 # sqrt_17 => sqrt_17 # sub_17 => sub_32 # var_mean_17 => var_mean_32 # w_17 => div_17 # Graph fragment: # %var_mean_32 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_49, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_49, %getitem_67), kwargs = {}) # %add_51 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_66, 1e-10), kwargs = {}) # %sqrt_17 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_51,), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_32, %sqrt_17), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_26 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_26', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[2048, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_26', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_26(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 2048 XBLOCK: tl.constexpr = 1 rnumel = 512 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (512*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (512*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2c/c2c52oqydx2qqnye3ms2nhtnjykxyv6xutsiklmey66ax3u3kxre.py # Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.add] # Source node to ATen node mapping: # input_8 => add_52 # Graph fragment: # %add_52 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_17, %convolution_14), kwargs = {}) triton_poi_fused_add_27 = async_compile.triton('triton_poi_fused_add_27', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_27', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_27(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = tl.load(in_out_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bz/cbzasy4ze2re4vldncooexegruzzqnk6h26e2lq65kivzh26krpx.py # Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_15 => add_53, rsqrt_15, var_mean_33 # Graph fragment: # %var_mean_33 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_30, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_53 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_68, 1e-05), kwargs = {}) # %rsqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_53,), kwargs = {}) triton_red_fused_native_group_norm_28 = async_compile.triton('triton_red_fused_native_group_norm_28', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 64 r3 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (131072*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ai/caiz2ipqaqy3yvfxd23r6kgede3xfyove4vfzoze622iyk6yxnob.py # Topologically Sorted Source Nodes: [group_norm_15, out_20], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_15 => add_54, mul_31 # out_20 => relu_15 # Graph fragment: # %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_31, %unsqueeze_95), kwargs = {}) # %add_54 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_31, %unsqueeze_92), kwargs = {}) # %relu_15 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_54,), kwargs = {}) triton_poi_fused_native_group_norm_relu_29 = async_compile.triton('triton_poi_fused_native_group_norm_relu_29', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = (xindex // 131072) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 4096.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/q7/cq7qfvsx4rl6ppxrincqirqseiykkhanw5f6ciztpudikgkuztk7.py # Topologically Sorted Source Nodes: [var_mean_18, sub_18, add_23, sqrt_18, w_18], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_23 => add_55 # sqrt_18 => sqrt_18 # sub_18 => sub_34 # var_mean_18 => var_mean_34 # w_18 => div_18 # Graph fragment: # %var_mean_34 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_52, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_52, %getitem_71), kwargs = {}) # %add_55 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_70, 1e-10), kwargs = {}) # %sqrt_18 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_55,), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_34, %sqrt_18), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_30 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_30', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_30', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/46/c46uzwtkm72q554z66kfflo66d3jxej542mlrkn5kejeivkb77fl.py # Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.add] # Source node to ATen node mapping: # input_9 => add_62 # Graph fragment: # %add_62 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_20, %add_52), kwargs = {}) triton_poi_fused_add_31 = async_compile.triton('triton_poi_fused_add_31', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_31', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_31(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kt/cktrt7d4uohqooff3hbktlyw3jxflx7srzvymlnrkdul5brzhp4v.py # Topologically Sorted Source Nodes: [var_mean_27, sub_27, add_35, sqrt_27, w_27], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_35 => add_85 # sqrt_27 => sqrt_27 # sub_27 => sub_52 # var_mean_27 => var_mean_52 # w_27 => div_27 # Graph fragment: # %var_mean_52 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_79, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_52 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_79, %getitem_107), kwargs = {}) # %add_85 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_106, 1e-10), kwargs = {}) # %sqrt_27 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_85,), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_52, %sqrt_27), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_32 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_32', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[4096, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_32', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 4096 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2p/c2pa75h6vcympkonevngx7jgya3va4wrjf32c2j6odcqubxn55kq.py # Topologically Sorted Source Nodes: [var_mean_28, sub_28, add_36, sqrt_28, w_28], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_36 => add_86 # sqrt_28 => sqrt_28 # sub_28 => sub_53 # var_mean_28 => var_mean_53 # w_28 => div_28 # Graph fragment: # %var_mean_53 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_80, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_53 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_80, %getitem_109), kwargs = {}) # %add_86 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_108, 1e-10), kwargs = {}) # %sqrt_28 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_86,), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_53, %sqrt_28), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_33 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_33', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1024, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_33', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_33(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1024 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pk/cpkn7dyjdvgjdsd22adrasuzoh2m2mcmlbhqoa355to4spbmfc6o.py # Topologically Sorted Source Nodes: [group_norm_25], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_25 => add_87, rsqrt_25, var_mean_54 # Graph fragment: # %var_mean_54 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_50, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_87 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_110, 1e-05), kwargs = {}) # %rsqrt_25 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_87,), kwargs = {}) triton_red_fused_native_group_norm_34 = async_compile.triton('triton_red_fused_native_group_norm_34', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_34(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = (rindex // 32) tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/do/cdosiy5sv6smtosgbknn2mualigm45xk4rnnmiei2dbhnr6jbcyh.py # Topologically Sorted Source Nodes: [group_norm_25, relu_25], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_25 => add_88, mul_51 # relu_25 => relu_25 # Graph fragment: # %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_51, %unsqueeze_155), kwargs = {}) # %add_88 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_51, %unsqueeze_152), kwargs = {}) # %relu_25 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_88,), kwargs = {}) triton_poi_fused_native_group_norm_relu_35 = async_compile.triton('triton_poi_fused_native_group_norm_relu_35', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 65536) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kz/ckz3sqktbzpz5oi5pps4ah7ojvp5n77g7w5alzx4cdklrbcqjcp7.py # Topologically Sorted Source Nodes: [var_mean_29, sub_29, add_37, sqrt_29, w_29], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_37 => add_89 # sqrt_29 => sqrt_29 # sub_29 => sub_55 # var_mean_29 => var_mean_55 # w_29 => div_29 # Graph fragment: # %var_mean_55 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_83, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_55 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_83, %getitem_113), kwargs = {}) # %add_89 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_112, 1e-10), kwargs = {}) # %sqrt_29 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_89,), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_55, %sqrt_29), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_36 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_36', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1024, 16384], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_36', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_36(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1024 rnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (9216*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 9216.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (9216*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (9216*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u5/cu5qbvq7rhsj5qzjpgn4u3l7m2flsdl6jv6rsorhhef7z7t5rpeu.py # Topologically Sorted Source Nodes: [group_norm_26], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_26 => add_90, rsqrt_26, var_mean_56 # Graph fragment: # %var_mean_56 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_52, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_90 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_114, 1e-05), kwargs = {}) # %rsqrt_26 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_90,), kwargs = {}) triton_per_fused_native_group_norm_37 = async_compile.triton('triton_per_fused_native_group_norm_37', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_37', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_37(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 512 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex % 32 r3 = (rindex // 32) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (16384*x1)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x4), tmp18, None) tl.store(out_ptr0 + (x4), tmp8, None) tl.store(out_ptr1 + (x4), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ne/cne7llmjajc2ac624wbhyotx4of4ba24zgfkmy5mj6hluta3oajy.py # Topologically Sorted Source Nodes: [group_norm_26, relu_26], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_26 => add_91, mul_53 # relu_26 => relu_26 # Graph fragment: # %mul_53 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_53, %unsqueeze_161), kwargs = {}) # %add_91 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_53, %unsqueeze_158), kwargs = {}) # %relu_26 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_91,), kwargs = {}) triton_poi_fused_native_group_norm_relu_38 = async_compile.triton('triton_poi_fused_native_group_norm_relu_38', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_38', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_38(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 16384) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 512.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pw/cpwuyihukeko3sz2mange4jiyuc6hegivng7t6xlmjwzff2jslfm.py # Topologically Sorted Source Nodes: [var_mean_30, sub_30, add_38, sqrt_30, w_30], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_38 => add_92 # sqrt_30 => sqrt_30 # sub_30 => sub_57 # var_mean_30 => var_mean_57 # w_30 => div_30 # Graph fragment: # %var_mean_57 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_86, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_57 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_86, %getitem_117), kwargs = {}) # %add_92 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_116, 1e-10), kwargs = {}) # %sqrt_30 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_92,), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_57, %sqrt_30), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_39 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_39', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4096, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_39', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_39(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 4096 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kt/cktxvckhfax7pjim22s6n5uzucjshabg76bthqfv5bt7xvx2rg7v.py # Topologically Sorted Source Nodes: [input_12], Original ATen: [aten.add] # Source node to ATen node mapping: # input_12 => add_93 # Graph fragment: # %add_93 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_30, %convolution_27), kwargs = {}) triton_poi_fused_add_40 = async_compile.triton('triton_poi_fused_add_40', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_40', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_40(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = tl.load(in_out_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3z/c3zgsvchcnqibeqy5jdmcbb664t2dc7anoixlyfsbcbmr7q6etbj.py # Topologically Sorted Source Nodes: [group_norm_27], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_27 => add_94, rsqrt_27, var_mean_58 # Graph fragment: # %var_mean_58 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_54, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_94 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_118, 1e-05), kwargs = {}) # %rsqrt_27 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_94,), kwargs = {}) triton_red_fused_native_group_norm_41 = async_compile.triton('triton_red_fused_native_group_norm_41', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 128 r3 = (rindex // 128) tmp0 = tl.load(in_ptr0 + (r2 + (128*x0) + (4096*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5j/c5jagcpyiyis7hpqnt5po5ouog6tiegjmrxnuvik36qafxw6ph47.py # Topologically Sorted Source Nodes: [group_norm_27, out_36], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_27 => add_95, mul_55 # out_36 => relu_27 # Graph fragment: # %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_55, %unsqueeze_167), kwargs = {}) # %add_95 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_55, %unsqueeze_164), kwargs = {}) # %relu_27 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_95,), kwargs = {}) triton_poi_fused_native_group_norm_relu_42 = async_compile.triton('triton_poi_fused_native_group_norm_relu_42', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_42', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_42(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = (xindex // 65536) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5c/c5cnjqjnvm5kgsecw54s3qbswpzb3ndbzzivmfbmbr5ek3n7tfho.py # Topologically Sorted Source Nodes: [var_mean_31, sub_31, add_40, sqrt_31, w_31], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_40 => add_96 # sqrt_31 => sqrt_31 # sub_31 => sub_59 # var_mean_31 => var_mean_59 # w_31 => div_31 # Graph fragment: # %var_mean_59 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_89, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_59 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_89, %getitem_121), kwargs = {}) # %add_96 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_120, 1e-10), kwargs = {}) # %sqrt_31 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_96,), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_59, %sqrt_31), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_43 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_43', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1024, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_43', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1024 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (4096*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dl/cdlkdgo2g6uuwzzliq3ols47a2pt4qrqa2hxzxqniidji2r3opyh.py # Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.add] # Source node to ATen node mapping: # input_13 => add_103 # Graph fragment: # %add_103 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_33, %add_93), kwargs = {}) triton_poi_fused_add_44 = async_compile.triton('triton_poi_fused_add_44', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_44', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_44(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p2/cp2pkyyfmqpfiixnv3jjjvia7zvprrskyyin2oz4s5h4td3uobbn.py # Topologically Sorted Source Nodes: [var_mean_40, sub_40, add_52, sqrt_40, w_40], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_52 => add_126 # sqrt_40 => sqrt_40 # sub_40 => sub_77 # var_mean_40 => var_mean_77 # w_40 => div_40 # Graph fragment: # %var_mean_77 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_116, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_77 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_116, %getitem_157), kwargs = {}) # %add_126 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_156, 1e-10), kwargs = {}) # %sqrt_40 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_126,), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_77, %sqrt_40), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_45 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_45', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[8192, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_45', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_45(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 8192 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (4096*x0)), tmp12, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7v/c7vndo53sxch2xfxk2j225jshkuji44jqxb3kasd35ift5bl4cqu.py # Topologically Sorted Source Nodes: [var_mean_41, sub_41, add_53, sqrt_41, w_41], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_53 => add_127 # sqrt_41 => sqrt_41 # sub_41 => sub_78 # var_mean_41 => var_mean_78 # w_41 => div_41 # Graph fragment: # %var_mean_78 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_117, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_78 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_117, %getitem_159), kwargs = {}) # %add_127 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_158, 1e-10), kwargs = {}) # %sqrt_41 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_127,), kwargs = {}) # %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_78, %sqrt_41), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_46 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_46', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[2048, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_46', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_46(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 2048 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (4096*x0)), tmp12, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/c4/cc4m7vudmm3cnmrdoq6nrxav3dfryhniaztwf56ak3xwbsoexd6j.py # Topologically Sorted Source Nodes: [group_norm_37], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_37 => add_128, rsqrt_37, var_mean_79 # Graph fragment: # %var_mean_79 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_74, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_128 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_160, 1e-05), kwargs = {}) # %rsqrt_37 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_128,), kwargs = {}) triton_per_fused_native_group_norm_47 = async_compile.triton('triton_per_fused_native_group_norm_47', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_47', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_47(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex % 64 r3 = (rindex // 64) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (32768*x1)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x4), tmp18, None) tl.store(out_ptr0 + (x4), tmp8, None) tl.store(out_ptr1 + (x4), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/c4/cc4yjngdidzi6e5bjkxxkejgcpj7dbiephcer54rlmajddthlte7.py # Topologically Sorted Source Nodes: [group_norm_37, relu_37], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_37 => add_129, mul_75 # relu_37 => relu_37 # Graph fragment: # %mul_75 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_75, %unsqueeze_227), kwargs = {}) # %add_129 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_75, %unsqueeze_224), kwargs = {}) # %relu_37 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_129,), kwargs = {}) triton_poi_fused_native_group_norm_relu_48 = async_compile.triton('triton_poi_fused_native_group_norm_relu_48', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_48', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_48(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = (xindex // 32768) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pg/cpgx5ntjxzrjpuwcb4xicqlcvjnedc26m62m4ypjmwoyvd3hvmgj.py # Topologically Sorted Source Nodes: [var_mean_42, sub_42, add_54, sqrt_42, w_42], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_54 => add_130 # sqrt_42 => sqrt_42 # sub_42 => sub_80 # var_mean_42 => var_mean_80 # w_42 => div_42 # Graph fragment: # %var_mean_80 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_120, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_80 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_120, %getitem_163), kwargs = {}) # %add_130 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_162, 1e-10), kwargs = {}) # %sqrt_42 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_130,), kwargs = {}) # %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_80, %sqrt_42), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_49 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_49', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[2048, 32768], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_49', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_49(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 2048 rnumel = 18432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (18432*x0)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 18432.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (18432*x0)), rmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (18432*x0)), tmp12, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lc/clcxsrztfooasql5b6o2fhwxukcgbzvvpsfy34xamrihxhotepnv.py # Topologically Sorted Source Nodes: [group_norm_38], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_38 => add_131, rsqrt_38, var_mean_81 # Graph fragment: # %var_mean_81 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_76, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_131 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_164, 1e-05), kwargs = {}) # %rsqrt_38 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_131,), kwargs = {}) triton_per_fused_native_group_norm_50 = async_compile.triton('triton_per_fused_native_group_norm_50', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_50', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_50(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex % 64 r3 = (rindex // 64) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (8192*x1)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x4), tmp18, None) tl.store(out_ptr0 + (x4), tmp8, None) tl.store(out_ptr1 + (x4), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/76/c76lb5dgw7uy4lpt3ebksnkexpk5a4mcifrw43rrzrmwnhrksd6u.py # Topologically Sorted Source Nodes: [group_norm_38, relu_38], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_38 => add_132, mul_77 # relu_38 => relu_38 # Graph fragment: # %mul_77 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_77, %unsqueeze_233), kwargs = {}) # %add_132 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_77, %unsqueeze_230), kwargs = {}) # %relu_38 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_132,), kwargs = {}) triton_poi_fused_native_group_norm_relu_51 = async_compile.triton('triton_poi_fused_native_group_norm_relu_51', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_51', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_51(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = (xindex // 8192) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/a4/ca4jkajxllucjxjlawcrmgbqoc3gf3uabc23n74mijvijx6un6v4.py # Topologically Sorted Source Nodes: [var_mean_43, sub_43, add_55, sqrt_43, w_43], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_55 => add_133 # sqrt_43 => sqrt_43 # sub_43 => sub_82 # var_mean_43 => var_mean_82 # w_43 => div_43 # Graph fragment: # %var_mean_82 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_123, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_82 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_123, %getitem_167), kwargs = {}) # %add_133 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_166, 1e-10), kwargs = {}) # %sqrt_43 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_133,), kwargs = {}) # %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_82, %sqrt_43), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_52 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_52', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[8192, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_52', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_52(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 8192 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tr/ctrvbts2mcg36tlsxhn7jg7xen2u6a7kd7u4n3mosznsb6fa2wyo.py # Topologically Sorted Source Nodes: [input_16], Original ATen: [aten.add] # Source node to ATen node mapping: # input_16 => add_134 # Graph fragment: # %add_134 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_43, %convolution_40), kwargs = {}) triton_poi_fused_add_53 = async_compile.triton('triton_poi_fused_add_53', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_53', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_53(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = tl.load(in_out_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bm/cbmg74gizsfkitta6qalg37yo25qnqf63c5jyevmzhqmxnjx7xom.py # Topologically Sorted Source Nodes: [group_norm_39], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_39 => add_135, rsqrt_39, var_mean_83 # Graph fragment: # %var_mean_83 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_78, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_135 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_168, 1e-05), kwargs = {}) # %rsqrt_39 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_135,), kwargs = {}) triton_per_fused_native_group_norm_54 = async_compile.triton('triton_per_fused_native_group_norm_54', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_54', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_54(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex % 256 r3 = (rindex // 256) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (256*x0) + (8192*r3) + (32768*x1)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x4), tmp18, None) tl.store(out_ptr0 + (x4), tmp8, None) tl.store(out_ptr1 + (x4), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gz/cgzyjy3o2gqoit7amj557zg7toflqyj43kjgzl3ej62i7hk6lnee.py # Topologically Sorted Source Nodes: [group_norm_39, out_52], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_39 => add_136, mul_79 # out_52 => relu_39 # Graph fragment: # %mul_79 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_79, %unsqueeze_239), kwargs = {}) # %add_136 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_79, %unsqueeze_236), kwargs = {}) # %relu_39 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_136,), kwargs = {}) triton_poi_fused_native_group_norm_relu_55 = async_compile.triton('triton_poi_fused_native_group_norm_relu_55', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_55', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_55(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 8192 x2 = (xindex // 32768) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 256)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 256)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/aj/cajj5ki6azp2kbx2jxbslibq6dzfyc3ipjv2qjovureewr6dmplt.py # Topologically Sorted Source Nodes: [var_mean_44, sub_44, add_57, sqrt_44, w_44], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_57 => add_137 # sqrt_44 => sqrt_44 # sub_44 => sub_84 # var_mean_44 => var_mean_84 # w_44 => div_44 # Graph fragment: # %var_mean_84 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_126, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_84 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_126, %getitem_171), kwargs = {}) # %add_137 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_170, 1e-10), kwargs = {}) # %sqrt_44 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_137,), kwargs = {}) # %div_44 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_84, %sqrt_44), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_56 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_56', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[2048, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_56', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_56(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 2048 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (8192*x0)), tmp12, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ff/cff7brpmobqi2vug2ta24kdr3loqrvuixh2kh2dnci64fj23ijsj.py # Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.add] # Source node to ATen node mapping: # input_17 => add_144 # Graph fragment: # %add_144 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_46, %add_134), kwargs = {}) triton_poi_fused_add_57 = async_compile.triton('triton_poi_fused_add_57', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_57', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_57(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sz/cszq7gbtdxzwt3qzvghesh6nlz5v2y6sbgs46237zpvjscw4bbk4.py # Topologically Sorted Source Nodes: [input_20], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # input_20 => add_165, rsqrt_48, var_mean_101 # Graph fragment: # %var_mean_101 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_96, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_165 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_204, 1e-05), kwargs = {}) # %rsqrt_48 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_165,), kwargs = {}) triton_per_fused_native_group_norm_58 = async_compile.triton('triton_per_fused_native_group_norm_58', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_58', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_58(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex % 256 r3 = (rindex // 256) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (256*x0) + (8192*r3) + (32768*x1)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp18, None) tl.store(out_ptr0 + (x4), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ho/chohdgeexcik3iyqkflbyj2cmb4wqtijhbp72tnfgmvhausg25tu.py # Topologically Sorted Source Nodes: [input_20, input_21, input_22], Original ATen: [aten.native_group_norm, aten.relu, aten.mean] # Source node to ATen node mapping: # input_20 => add_166, mul_97 # input_21 => relu_48 # input_22 => mean # Graph fragment: # %mul_97 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_97, %unsqueeze_293), kwargs = {}) # %add_166 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_97, %unsqueeze_290), kwargs = {}) # %relu_48 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add_166,), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_48, [-1, -2], True), kwargs = {}) triton_poi_fused_mean_native_group_norm_relu_59 = async_compile.triton('triton_poi_fused_mean_native_group_norm_relu_59', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_native_group_norm_relu_59', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_native_group_norm_relu_59(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8192 x1 = (xindex // 8192) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (32768*x1)), None) tmp1 = tl.load(in_ptr1 + ((x2 // 256)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((x2 // 256)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (8192 + x0 + (32768*x1)), None) tmp18 = tl.load(in_ptr0 + (16384 + x0 + (32768*x1)), None) tmp25 = tl.load(in_ptr0 + (24576 + x0 + (32768*x1)), None) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = tmp11 - tmp1 tmp13 = tmp12 * tmp3 tmp14 = tmp13 * tmp5 tmp15 = tmp14 + tmp7 tmp16 = triton_helpers.maximum(tmp9, tmp15) tmp17 = tmp10 + tmp16 tmp19 = tmp18 - tmp1 tmp20 = tmp19 * tmp3 tmp21 = tmp20 * tmp5 tmp22 = tmp21 + tmp7 tmp23 = triton_helpers.maximum(tmp9, tmp22) tmp24 = tmp17 + tmp23 tmp26 = tmp25 - tmp1 tmp27 = tmp26 * tmp3 tmp28 = tmp27 * tmp5 tmp29 = tmp28 + tmp7 tmp30 = triton_helpers.maximum(tmp9, tmp29) tmp31 = tmp24 + tmp30 tmp32 = 4.0 tmp33 = tmp31 / tmp32 tl.store(out_ptr0 + (x2), tmp33, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sw/csw2vezjin4bvf6fhnzk3tefnt5ol3gapui4lrf37dd4xtvyd7fq.py # Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution] # Source node to ATen node mapping: # input_23 => convolution_53 # Graph fragment: # %convolution_53 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_153, %primals_154, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_60 = async_compile.triton('triton_poi_fused_convolution_60', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_60', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_60(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 87372 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 21843 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154 = args args.clear() assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (256, ), (1, )) assert_size_stride(primals_4, (256, ), (1, )) assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_6, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (256, ), (1, )) assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_10, (256, ), (1, )) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_13, (1024, ), (1, )) assert_size_stride(primals_14, (1024, ), (1, )) assert_size_stride(primals_15, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_16, (256, ), (1, )) assert_size_stride(primals_17, (256, ), (1, )) assert_size_stride(primals_18, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (256, ), (1, )) assert_size_stride(primals_20, (256, ), (1, )) assert_size_stride(primals_21, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_22, (1024, ), (1, )) assert_size_stride(primals_23, (1024, ), (1, )) assert_size_stride(primals_24, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_25, (256, ), (1, )) assert_size_stride(primals_26, (256, ), (1, )) assert_size_stride(primals_27, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_28, (256, ), (1, )) assert_size_stride(primals_29, (256, ), (1, )) assert_size_stride(primals_30, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_31, (1024, ), (1, )) assert_size_stride(primals_32, (1024, ), (1, )) assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_34, (256, ), (1, )) assert_size_stride(primals_35, (256, ), (1, )) assert_size_stride(primals_36, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (256, ), (1, )) assert_size_stride(primals_38, (256, ), (1, )) assert_size_stride(primals_39, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_40, (1024, ), (1, )) assert_size_stride(primals_41, (1024, ), (1, )) assert_size_stride(primals_42, (2048, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_43, (512, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_44, (512, ), (1, )) assert_size_stride(primals_45, (512, ), (1, )) assert_size_stride(primals_46, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_47, (512, ), (1, )) assert_size_stride(primals_48, (512, ), (1, )) assert_size_stride(primals_49, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_50, (2048, ), (1, )) assert_size_stride(primals_51, (2048, ), (1, )) assert_size_stride(primals_52, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_53, (512, ), (1, )) assert_size_stride(primals_54, (512, ), (1, )) assert_size_stride(primals_55, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_56, (512, ), (1, )) assert_size_stride(primals_57, (512, ), (1, )) assert_size_stride(primals_58, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_59, (2048, ), (1, )) assert_size_stride(primals_60, (2048, ), (1, )) assert_size_stride(primals_61, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_62, (512, ), (1, )) assert_size_stride(primals_63, (512, ), (1, )) assert_size_stride(primals_64, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_65, (512, ), (1, )) assert_size_stride(primals_66, (512, ), (1, )) assert_size_stride(primals_67, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_68, (2048, ), (1, )) assert_size_stride(primals_69, (2048, ), (1, )) assert_size_stride(primals_70, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_71, (512, ), (1, )) assert_size_stride(primals_72, (512, ), (1, )) assert_size_stride(primals_73, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_74, (512, ), (1, )) assert_size_stride(primals_75, (512, ), (1, )) assert_size_stride(primals_76, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_77, (2048, ), (1, )) assert_size_stride(primals_78, (2048, ), (1, )) assert_size_stride(primals_79, (4096, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_80, (1024, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_81, (1024, ), (1, )) assert_size_stride(primals_82, (1024, ), (1, )) assert_size_stride(primals_83, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_84, (1024, ), (1, )) assert_size_stride(primals_85, (1024, ), (1, )) assert_size_stride(primals_86, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_87, (4096, ), (1, )) assert_size_stride(primals_88, (4096, ), (1, )) assert_size_stride(primals_89, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_90, (1024, ), (1, )) assert_size_stride(primals_91, (1024, ), (1, )) assert_size_stride(primals_92, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_93, (1024, ), (1, )) assert_size_stride(primals_94, (1024, ), (1, )) assert_size_stride(primals_95, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_96, (4096, ), (1, )) assert_size_stride(primals_97, (4096, ), (1, )) assert_size_stride(primals_98, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_99, (1024, ), (1, )) assert_size_stride(primals_100, (1024, ), (1, )) assert_size_stride(primals_101, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_102, (1024, ), (1, )) assert_size_stride(primals_103, (1024, ), (1, )) assert_size_stride(primals_104, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_105, (4096, ), (1, )) assert_size_stride(primals_106, (4096, ), (1, )) assert_size_stride(primals_107, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_108, (1024, ), (1, )) assert_size_stride(primals_109, (1024, ), (1, )) assert_size_stride(primals_110, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_111, (1024, ), (1, )) assert_size_stride(primals_112, (1024, ), (1, )) assert_size_stride(primals_113, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_114, (4096, ), (1, )) assert_size_stride(primals_115, (4096, ), (1, )) assert_size_stride(primals_116, (8192, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_117, (2048, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_118, (2048, ), (1, )) assert_size_stride(primals_119, (2048, ), (1, )) assert_size_stride(primals_120, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_121, (2048, ), (1, )) assert_size_stride(primals_122, (2048, ), (1, )) assert_size_stride(primals_123, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_124, (8192, ), (1, )) assert_size_stride(primals_125, (8192, ), (1, )) assert_size_stride(primals_126, (2048, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_127, (2048, ), (1, )) assert_size_stride(primals_128, (2048, ), (1, )) assert_size_stride(primals_129, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_130, (2048, ), (1, )) assert_size_stride(primals_131, (2048, ), (1, )) assert_size_stride(primals_132, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_133, (8192, ), (1, )) assert_size_stride(primals_134, (8192, ), (1, )) assert_size_stride(primals_135, (2048, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_136, (2048, ), (1, )) assert_size_stride(primals_137, (2048, ), (1, )) assert_size_stride(primals_138, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_139, (2048, ), (1, )) assert_size_stride(primals_140, (2048, ), (1, )) assert_size_stride(primals_141, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_142, (8192, ), (1, )) assert_size_stride(primals_143, (8192, ), (1, )) assert_size_stride(primals_144, (2048, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_145, (2048, ), (1, )) assert_size_stride(primals_146, (2048, ), (1, )) assert_size_stride(primals_147, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_148, (2048, ), (1, )) assert_size_stride(primals_149, (2048, ), (1, )) assert_size_stride(primals_150, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_151, (8192, ), (1, )) assert_size_stride(primals_152, (8192, ), (1, )) assert_size_stride(primals_153, (21843, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_154, (21843, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 768, 49, grid=grid(768, 49), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_2, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_9, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_9 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_18, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_18 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_27, buf4, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_27 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_36, buf5, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_36 buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_46, buf6, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_46 buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_55, buf7, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_55 buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_64, buf8, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_64 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_73, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_73 buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_83, buf10, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_83 buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_92, buf11, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_92 buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_101, buf12, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_101 buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_110, buf13, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_110 buf14 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_120, buf14, 4194304, 9, grid=grid(4194304, 9), stream=stream0) del primals_120 buf15 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_129, buf15, 4194304, 9, grid=grid(4194304, 9), stream=stream0) del primals_129 buf16 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_138, buf16, 4194304, 9, grid=grid(4194304, 9), stream=stream0) del primals_138 buf17 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_147, buf17, 4194304, 9, grid=grid(4194304, 9), stream=stream0) del primals_147 buf19 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf21 = reinterpret_tensor(buf19, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf19 # reuse buf22 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32) # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_6.run(buf21, buf0, buf22, 256, 147, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf23 = extern_kernels.convolution(buf1, buf22, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 256, 32, 32), (262144, 1, 8192, 256)) buf24 = empty_strided_cuda((4, 256, 34, 34), (295936, 1, 8704, 256), torch.float32) # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_7.run(buf23, buf24, 1183744, grid=grid(1183744), stream=stream0) buf25 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) buf26 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.int8) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_8.run(buf24, buf25, buf26, 262144, grid=grid(262144), stream=stream0) buf27 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf28 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf30 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_9.run(buf25, buf27, buf28, buf30, 128, 2048, grid=grid(128), stream=stream0) buf31 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_10.run(buf25, buf27, buf28, primals_3, primals_4, buf31, 262144, grid=grid(262144), stream=stream0) del primals_4 buf33 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf35 = reinterpret_tensor(buf33, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf33 # reuse buf36 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_1, sub_1, add_1, sqrt_1, w_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf35, primals_5, buf36, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution] buf37 = extern_kernels.convolution(buf31, buf36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf39 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf41 = reinterpret_tensor(buf39, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf39 # reuse buf42 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_2, sub_2, add_2, sqrt_2, w_2], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_12.run(buf41, primals_6, buf42, 256, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf43 = extern_kernels.convolution(buf31, buf42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf44 = buf28; del buf28 # reuse buf45 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf47 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_1], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_9.run(buf43, buf44, buf45, buf47, 128, 2048, grid=grid(128), stream=stream0) buf48 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_1, relu_1], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_10.run(buf43, buf44, buf45, primals_7, primals_8, buf48, 262144, grid=grid(262144), stream=stream0) del primals_8 buf50 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf52 = reinterpret_tensor(buf50, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf50 # reuse buf53 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_3, sub_3, add_3, sqrt_3, w_3], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_13.run(buf52, buf2, buf53, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] buf54 = extern_kernels.convolution(buf48, buf53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf55 = buf45; del buf45 # reuse buf56 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf58 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_2], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_9.run(buf54, buf55, buf56, buf58, 128, 2048, grid=grid(128), stream=stream0) buf59 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_2, relu_2], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_10.run(buf54, buf55, buf56, primals_10, primals_11, buf59, 262144, grid=grid(262144), stream=stream0) del primals_11 buf61 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf63 = reinterpret_tensor(buf61, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf61 # reuse buf64 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_4, sub_4, add_4, sqrt_4, w_4], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf63, primals_12, buf64, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] buf65 = extern_kernels.convolution(buf59, buf64, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf66 = buf37; del buf37 # reuse # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.add] triton_poi_fused_add_14.run(buf66, buf65, 1048576, grid=grid(1048576), stream=stream0) buf67 = buf56; del buf56 # reuse buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf70 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_3], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf66, buf67, buf68, buf70, 128, 8192, grid=grid(128), stream=stream0) buf71 = buf65; del buf65 # reuse # Topologically Sorted Source Nodes: [group_norm_3, out_4], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_16.run(buf66, buf67, buf68, primals_13, primals_14, buf71, 1048576, grid=grid(1048576), stream=stream0) del primals_14 buf73 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf75 = reinterpret_tensor(buf73, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf73 # reuse buf76 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_5, sub_5, add_6, sqrt_5, w_5], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf75, primals_15, buf76, 256, 1024, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution] buf77 = extern_kernels.convolution(buf71, buf76, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf77, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf78 = buf68; del buf68 # reuse buf79 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf81 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_4], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_9.run(buf77, buf78, buf79, buf81, 128, 2048, grid=grid(128), stream=stream0) buf82 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_4, relu_4], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_10.run(buf77, buf78, buf79, primals_16, primals_17, buf82, 262144, grid=grid(262144), stream=stream0) del primals_17 buf84 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf86 = reinterpret_tensor(buf84, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf84 # reuse buf87 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_6, sub_6, add_7, sqrt_6, w_6], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_13.run(buf86, buf3, buf87, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution] buf88 = extern_kernels.convolution(buf82, buf87, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf88, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf89 = buf79; del buf79 # reuse buf90 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf92 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_5], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_9.run(buf88, buf89, buf90, buf92, 128, 2048, grid=grid(128), stream=stream0) buf93 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_5, relu_5], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_10.run(buf88, buf89, buf90, primals_19, primals_20, buf93, 262144, grid=grid(262144), stream=stream0) del primals_20 buf95 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf97 = reinterpret_tensor(buf95, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf95 # reuse buf98 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_7, sub_7, add_8, sqrt_7, w_7], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf97, primals_21, buf98, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution] buf99 = extern_kernels.convolution(buf93, buf98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf100 = buf99; del buf99 # reuse # Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.add] triton_poi_fused_add_18.run(buf100, buf66, 1048576, grid=grid(1048576), stream=stream0) buf101 = buf90; del buf90 # reuse buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf104 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_6], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf100, buf101, buf102, buf104, 128, 8192, grid=grid(128), stream=stream0) buf105 = reinterpret_tensor(buf23, (4, 1024, 16, 16), (262144, 1, 16384, 1024), 0); del buf23 # reuse # Topologically Sorted Source Nodes: [group_norm_6, out_8], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_16.run(buf100, buf101, buf102, primals_22, primals_23, buf105, 1048576, grid=grid(1048576), stream=stream0) del primals_23 buf107 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf109 = reinterpret_tensor(buf107, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf107 # reuse buf110 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_8, sub_8, add_10, sqrt_8, w_8], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf109, primals_24, buf110, 256, 1024, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.convolution] buf111 = extern_kernels.convolution(buf105, buf110, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf111, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf112 = buf102; del buf102 # reuse buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf115 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_7], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_9.run(buf111, buf112, buf113, buf115, 128, 2048, grid=grid(128), stream=stream0) buf116 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_7, relu_7], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_10.run(buf111, buf112, buf113, primals_25, primals_26, buf116, 262144, grid=grid(262144), stream=stream0) del primals_26 buf118 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf120 = reinterpret_tensor(buf118, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf118 # reuse buf121 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_9, sub_9, add_11, sqrt_9, w_9], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_13.run(buf120, buf4, buf121, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.convolution] buf122 = extern_kernels.convolution(buf116, buf121, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf122, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf123 = buf113; del buf113 # reuse buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf126 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_8], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_9.run(buf122, buf123, buf124, buf126, 128, 2048, grid=grid(128), stream=stream0) buf127 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_8, relu_8], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_10.run(buf122, buf123, buf124, primals_28, primals_29, buf127, 262144, grid=grid(262144), stream=stream0) del primals_29 buf129 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf131 = reinterpret_tensor(buf129, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf129 # reuse buf132 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_10, sub_10, add_12, sqrt_10, w_10], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf131, primals_30, buf132, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.convolution] buf133 = extern_kernels.convolution(buf127, buf132, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf133, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf134 = buf133; del buf133 # reuse # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.add] triton_poi_fused_add_18.run(buf134, buf100, 1048576, grid=grid(1048576), stream=stream0) buf135 = buf124; del buf124 # reuse buf136 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf138 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_9], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf134, buf135, buf136, buf138, 128, 8192, grid=grid(128), stream=stream0) buf139 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_9, out_12], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_16.run(buf134, buf135, buf136, primals_31, primals_32, buf139, 1048576, grid=grid(1048576), stream=stream0) del primals_32 buf141 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf143 = reinterpret_tensor(buf141, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf141 # reuse buf144 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_11, sub_11, add_14, sqrt_11, w_11], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf143, primals_33, buf144, 256, 1024, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.convolution] buf145 = extern_kernels.convolution(buf139, buf144, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf145, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf146 = buf136; del buf136 # reuse buf147 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf149 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_10], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_9.run(buf145, buf146, buf147, buf149, 128, 2048, grid=grid(128), stream=stream0) buf150 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_10, relu_10], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_10.run(buf145, buf146, buf147, primals_34, primals_35, buf150, 262144, grid=grid(262144), stream=stream0) del primals_35 buf152 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf154 = reinterpret_tensor(buf152, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf152 # reuse buf155 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_12, sub_12, add_15, sqrt_12, w_12], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_13.run(buf154, buf5, buf155, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution] buf156 = extern_kernels.convolution(buf150, buf155, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf156, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf157 = buf147; del buf147 # reuse buf158 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf160 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_11], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_9.run(buf156, buf157, buf158, buf160, 128, 2048, grid=grid(128), stream=stream0) buf161 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_11, relu_11], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_10.run(buf156, buf157, buf158, primals_37, primals_38, buf161, 262144, grid=grid(262144), stream=stream0) del primals_38 buf163 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf165 = reinterpret_tensor(buf163, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf163 # reuse buf166 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_13, sub_13, add_16, sqrt_13, w_13], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf165, primals_39, buf166, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_15], Original ATen: [aten.convolution] buf167 = extern_kernels.convolution(buf161, buf166, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf167, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf168 = buf167; del buf167 # reuse # Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.add] triton_poi_fused_add_18.run(buf168, buf134, 1048576, grid=grid(1048576), stream=stream0) buf169 = buf158; del buf158 # reuse buf170 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf172 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_12], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf168, buf169, buf170, buf172, 128, 8192, grid=grid(128), stream=stream0) buf173 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_12, out_16], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_16.run(buf168, buf169, buf170, primals_40, primals_41, buf173, 1048576, grid=grid(1048576), stream=stream0) del primals_41 buf175 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf177 = reinterpret_tensor(buf175, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf175 # reuse buf178 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_14, sub_14, add_18, sqrt_14, w_14], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_19.run(buf177, primals_42, buf178, 2048, 1024, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.convolution] buf179 = extern_kernels.convolution(buf173, buf178, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf179, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf181 # reuse buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_15, sub_15, add_19, sqrt_15, w_15], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_20.run(buf183, primals_43, buf184, 512, 1024, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.convolution] buf185 = extern_kernels.convolution(buf173, buf184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf185, (4, 512, 16, 16), (131072, 1, 8192, 512)) buf186 = buf170; del buf170 # reuse buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_13], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_21.run(buf185, buf186, buf187, buf189, 128, 4096, grid=grid(128), stream=stream0) buf190 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_13, relu_13], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_22.run(buf185, buf186, buf187, primals_44, primals_45, buf190, 524288, grid=grid(524288), stream=stream0) del primals_45 buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf192 # reuse buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_16, sub_16, add_20, sqrt_16, w_16], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_23.run(buf194, buf6, buf195, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [out_18], Original ATen: [aten.convolution] buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf197 = buf187; del buf187 # reuse buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_14], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_24.run(buf196, buf197, buf198, buf200, 128, 1024, grid=grid(128), stream=stream0) buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_14, relu_14], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_25.run(buf196, buf197, buf198, primals_47, primals_48, buf201, 131072, grid=grid(131072), stream=stream0) del primals_48 buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf203 # reuse buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_17, sub_17, add_21, sqrt_17, w_17], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_26.run(buf205, primals_49, buf206, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_19], Original ATen: [aten.convolution] buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf208 = buf179; del buf179 # reuse # Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.add] triton_poi_fused_add_27.run(buf208, buf207, 524288, grid=grid(524288), stream=stream0) buf209 = buf198; del buf198 # reuse buf210 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf212 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf208, buf209, buf210, buf212, 128, 4096, grid=grid(128), stream=stream0) buf213 = buf207; del buf207 # reuse # Topologically Sorted Source Nodes: [group_norm_15, out_20], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_29.run(buf208, buf209, buf210, primals_50, primals_51, buf213, 524288, grid=grid(524288), stream=stream0) del primals_51 buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf215 # reuse buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_18, sub_18, add_23, sqrt_18, w_18], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf217, primals_52, buf218, 512, 2048, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [out_21], Original ATen: [aten.convolution] buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf220 = buf210; del buf210 # reuse buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_16], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_24.run(buf219, buf220, buf221, buf223, 128, 1024, grid=grid(128), stream=stream0) buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_16, relu_16], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_25.run(buf219, buf220, buf221, primals_53, primals_54, buf224, 131072, grid=grid(131072), stream=stream0) del primals_54 buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf226 # reuse buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_19, sub_19, add_24, sqrt_19, w_19], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_23.run(buf228, buf7, buf229, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [out_22], Original ATen: [aten.convolution] buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf231 = buf221; del buf221 # reuse buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_17], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_24.run(buf230, buf231, buf232, buf234, 128, 1024, grid=grid(128), stream=stream0) buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_17, relu_17], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_25.run(buf230, buf231, buf232, primals_56, primals_57, buf235, 131072, grid=grid(131072), stream=stream0) del primals_57 buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf237 # reuse buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_20, sub_20, add_25, sqrt_20, w_20], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_26.run(buf239, primals_58, buf240, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_23], Original ATen: [aten.convolution] buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf242 = buf241; del buf241 # reuse # Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.add] triton_poi_fused_add_31.run(buf242, buf208, 524288, grid=grid(524288), stream=stream0) buf243 = buf232; del buf232 # reuse buf244 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf246 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_18], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf242, buf243, buf244, buf246, 128, 4096, grid=grid(128), stream=stream0) buf247 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_18, out_24], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_29.run(buf242, buf243, buf244, primals_59, primals_60, buf247, 524288, grid=grid(524288), stream=stream0) del primals_60 buf249 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf251 = reinterpret_tensor(buf249, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf249 # reuse buf252 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_21, sub_21, add_27, sqrt_21, w_21], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf251, primals_61, buf252, 512, 2048, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [out_25], Original ATen: [aten.convolution] buf253 = extern_kernels.convolution(buf247, buf252, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf253, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf254 = buf244; del buf244 # reuse buf255 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf257 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_19], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_24.run(buf253, buf254, buf255, buf257, 128, 1024, grid=grid(128), stream=stream0) buf258 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_19, relu_19], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_25.run(buf253, buf254, buf255, primals_62, primals_63, buf258, 131072, grid=grid(131072), stream=stream0) del primals_63 buf260 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf262 = reinterpret_tensor(buf260, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf260 # reuse buf263 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_22, sub_22, add_28, sqrt_22, w_22], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_23.run(buf262, buf8, buf263, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [out_26], Original ATen: [aten.convolution] buf264 = extern_kernels.convolution(buf258, buf263, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf264, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf265 = buf255; del buf255 # reuse buf266 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf268 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_20], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_24.run(buf264, buf265, buf266, buf268, 128, 1024, grid=grid(128), stream=stream0) buf269 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_20, relu_20], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_25.run(buf264, buf265, buf266, primals_65, primals_66, buf269, 131072, grid=grid(131072), stream=stream0) del primals_66 buf271 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf273 = reinterpret_tensor(buf271, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf271 # reuse buf274 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_23, sub_23, add_29, sqrt_23, w_23], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_26.run(buf273, primals_67, buf274, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_27], Original ATen: [aten.convolution] buf275 = extern_kernels.convolution(buf269, buf274, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf275, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf276 = buf275; del buf275 # reuse # Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.add] triton_poi_fused_add_31.run(buf276, buf242, 524288, grid=grid(524288), stream=stream0) buf277 = buf266; del buf266 # reuse buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf280 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_21], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf276, buf277, buf278, buf280, 128, 4096, grid=grid(128), stream=stream0) buf281 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_21, out_28], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_29.run(buf276, buf277, buf278, primals_68, primals_69, buf281, 524288, grid=grid(524288), stream=stream0) del primals_69 buf283 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf285 = reinterpret_tensor(buf283, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf283 # reuse buf286 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_24, sub_24, add_31, sqrt_24, w_24], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf285, primals_70, buf286, 512, 2048, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [out_29], Original ATen: [aten.convolution] buf287 = extern_kernels.convolution(buf281, buf286, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf287, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf288 = buf278; del buf278 # reuse buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf291 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_22], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_24.run(buf287, buf288, buf289, buf291, 128, 1024, grid=grid(128), stream=stream0) buf292 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_22, relu_22], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_25.run(buf287, buf288, buf289, primals_71, primals_72, buf292, 131072, grid=grid(131072), stream=stream0) del primals_72 buf294 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf296 = reinterpret_tensor(buf294, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf294 # reuse buf297 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_25, sub_25, add_32, sqrt_25, w_25], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_23.run(buf296, buf9, buf297, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [out_30], Original ATen: [aten.convolution] buf298 = extern_kernels.convolution(buf292, buf297, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf298, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf299 = buf289; del buf289 # reuse buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf302 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_23], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_24.run(buf298, buf299, buf300, buf302, 128, 1024, grid=grid(128), stream=stream0) buf303 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_23, relu_23], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_25.run(buf298, buf299, buf300, primals_74, primals_75, buf303, 131072, grid=grid(131072), stream=stream0) del primals_75 buf305 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf307 = reinterpret_tensor(buf305, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf305 # reuse buf308 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_26, sub_26, add_33, sqrt_26, w_26], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_26.run(buf307, primals_76, buf308, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_31], Original ATen: [aten.convolution] buf309 = extern_kernels.convolution(buf303, buf308, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf309, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf310 = buf309; del buf309 # reuse # Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.add] triton_poi_fused_add_31.run(buf310, buf276, 524288, grid=grid(524288), stream=stream0) buf311 = buf300; del buf300 # reuse buf312 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf314 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_24], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf310, buf311, buf312, buf314, 128, 4096, grid=grid(128), stream=stream0) buf315 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_24, out_32], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_29.run(buf310, buf311, buf312, primals_77, primals_78, buf315, 524288, grid=grid(524288), stream=stream0) del primals_78 buf317 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf319 = reinterpret_tensor(buf317, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf317 # reuse buf320 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_27, sub_27, add_35, sqrt_27, w_27], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_32.run(buf319, primals_79, buf320, 4096, 2048, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [residual_2], Original ATen: [aten.convolution] buf321 = extern_kernels.convolution(buf315, buf320, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf321, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf323 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf325 = reinterpret_tensor(buf323, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf323 # reuse buf326 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_28, sub_28, add_36, sqrt_28, w_28], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_33.run(buf325, primals_80, buf326, 1024, 2048, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_33], Original ATen: [aten.convolution] buf327 = extern_kernels.convolution(buf315, buf326, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf327, (4, 1024, 8, 8), (65536, 1, 8192, 1024)) buf328 = buf312; del buf312 # reuse buf329 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf331 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_25], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_34.run(buf327, buf328, buf329, buf331, 128, 2048, grid=grid(128), stream=stream0) buf332 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_25, relu_25], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_35.run(buf327, buf328, buf329, primals_81, primals_82, buf332, 262144, grid=grid(262144), stream=stream0) del primals_82 buf334 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf336 = reinterpret_tensor(buf334, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf334 # reuse buf337 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_29, sub_29, add_37, sqrt_29, w_29], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_36.run(buf336, buf10, buf337, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_34], Original ATen: [aten.convolution] buf338 = extern_kernels.convolution(buf332, buf337, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf338, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf339 = buf329; del buf329 # reuse buf340 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf342 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_26], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_37.run(buf338, buf339, buf340, buf342, 128, 512, grid=grid(128), stream=stream0) buf343 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_26, relu_26], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_38.run(buf338, buf339, buf340, primals_84, primals_85, buf343, 65536, grid=grid(65536), stream=stream0) del primals_85 buf345 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf347 = reinterpret_tensor(buf345, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf345 # reuse buf348 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_30, sub_30, add_38, sqrt_30, w_30], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_39.run(buf347, primals_86, buf348, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [out_35], Original ATen: [aten.convolution] buf349 = extern_kernels.convolution(buf343, buf348, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf349, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf350 = buf321; del buf321 # reuse # Topologically Sorted Source Nodes: [input_12], Original ATen: [aten.add] triton_poi_fused_add_40.run(buf350, buf349, 262144, grid=grid(262144), stream=stream0) buf351 = buf340; del buf340 # reuse buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_27], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf350, buf351, buf352, buf354, 128, 2048, grid=grid(128), stream=stream0) buf355 = buf349; del buf349 # reuse # Topologically Sorted Source Nodes: [group_norm_27, out_36], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_42.run(buf350, buf351, buf352, primals_87, primals_88, buf355, 262144, grid=grid(262144), stream=stream0) del primals_88 buf357 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf359 = reinterpret_tensor(buf357, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf357 # reuse buf360 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_31, sub_31, add_40, sqrt_31, w_31], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf359, primals_89, buf360, 1024, 4096, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_37], Original ATen: [aten.convolution] buf361 = extern_kernels.convolution(buf355, buf360, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf361, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf362 = buf352; del buf352 # reuse buf363 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf365 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_28], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_37.run(buf361, buf362, buf363, buf365, 128, 512, grid=grid(128), stream=stream0) buf366 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_28, relu_28], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_38.run(buf361, buf362, buf363, primals_90, primals_91, buf366, 65536, grid=grid(65536), stream=stream0) del primals_91 buf368 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf370 = reinterpret_tensor(buf368, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf368 # reuse buf371 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_32, sub_32, add_41, sqrt_32, w_32], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_36.run(buf370, buf11, buf371, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_38], Original ATen: [aten.convolution] buf372 = extern_kernels.convolution(buf366, buf371, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf372, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf373 = buf363; del buf363 # reuse buf374 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf376 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_29], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_37.run(buf372, buf373, buf374, buf376, 128, 512, grid=grid(128), stream=stream0) buf377 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_29, relu_29], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_38.run(buf372, buf373, buf374, primals_93, primals_94, buf377, 65536, grid=grid(65536), stream=stream0) del primals_94 buf379 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf381 = reinterpret_tensor(buf379, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf379 # reuse buf382 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_33, sub_33, add_42, sqrt_33, w_33], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_39.run(buf381, primals_95, buf382, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [out_39], Original ATen: [aten.convolution] buf383 = extern_kernels.convolution(buf377, buf382, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf383, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf384 = buf383; del buf383 # reuse # Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.add] triton_poi_fused_add_44.run(buf384, buf350, 262144, grid=grid(262144), stream=stream0) buf385 = buf374; del buf374 # reuse buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_30], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf384, buf385, buf386, buf388, 128, 2048, grid=grid(128), stream=stream0) buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32) # Topologically Sorted Source Nodes: [group_norm_30, out_40], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_42.run(buf384, buf385, buf386, primals_96, primals_97, buf389, 262144, grid=grid(262144), stream=stream0) del primals_97 buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf391 # reuse buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_34, sub_34, add_44, sqrt_34, w_34], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf393, primals_98, buf394, 1024, 4096, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_41], Original ATen: [aten.convolution] buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf396 = buf386; del buf386 # reuse buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_31], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_37.run(buf395, buf396, buf397, buf399, 128, 512, grid=grid(128), stream=stream0) buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_31, relu_31], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_38.run(buf395, buf396, buf397, primals_99, primals_100, buf400, 65536, grid=grid(65536), stream=stream0) del primals_100 buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf402 # reuse buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_35, sub_35, add_45, sqrt_35, w_35], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_36.run(buf404, buf12, buf405, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_42], Original ATen: [aten.convolution] buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf407 = buf397; del buf397 # reuse buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_32], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_37.run(buf406, buf407, buf408, buf410, 128, 512, grid=grid(128), stream=stream0) buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_32, relu_32], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_38.run(buf406, buf407, buf408, primals_102, primals_103, buf411, 65536, grid=grid(65536), stream=stream0) del primals_103 buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf413 # reuse buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_36, sub_36, add_46, sqrt_36, w_36], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_39.run(buf415, primals_104, buf416, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [out_43], Original ATen: [aten.convolution] buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf418 = buf417; del buf417 # reuse # Topologically Sorted Source Nodes: [input_14], Original ATen: [aten.add] triton_poi_fused_add_44.run(buf418, buf384, 262144, grid=grid(262144), stream=stream0) buf419 = buf408; del buf408 # reuse buf420 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf422 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_33], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf418, buf419, buf420, buf422, 128, 2048, grid=grid(128), stream=stream0) buf423 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32) # Topologically Sorted Source Nodes: [group_norm_33, out_44], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_42.run(buf418, buf419, buf420, primals_105, primals_106, buf423, 262144, grid=grid(262144), stream=stream0) del primals_106 buf425 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf427 = reinterpret_tensor(buf425, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf425 # reuse buf428 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_37, sub_37, add_48, sqrt_37, w_37], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf427, primals_107, buf428, 1024, 4096, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_45], Original ATen: [aten.convolution] buf429 = extern_kernels.convolution(buf423, buf428, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf429, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf430 = buf420; del buf420 # reuse buf431 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf433 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_34], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_37.run(buf429, buf430, buf431, buf433, 128, 512, grid=grid(128), stream=stream0) buf434 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_34, relu_34], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_38.run(buf429, buf430, buf431, primals_108, primals_109, buf434, 65536, grid=grid(65536), stream=stream0) del primals_109 buf436 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf438 = reinterpret_tensor(buf436, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf436 # reuse buf439 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_38, sub_38, add_49, sqrt_38, w_38], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_36.run(buf438, buf13, buf439, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [out_46], Original ATen: [aten.convolution] buf440 = extern_kernels.convolution(buf434, buf439, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf440, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf441 = buf431; del buf431 # reuse buf442 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf444 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_35], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_37.run(buf440, buf441, buf442, buf444, 128, 512, grid=grid(128), stream=stream0) buf445 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_35, relu_35], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_38.run(buf440, buf441, buf442, primals_111, primals_112, buf445, 65536, grid=grid(65536), stream=stream0) del primals_112 buf447 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf449 = reinterpret_tensor(buf447, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf447 # reuse buf450 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_39, sub_39, add_50, sqrt_39, w_39], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_39.run(buf449, primals_113, buf450, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [out_47], Original ATen: [aten.convolution] buf451 = extern_kernels.convolution(buf445, buf450, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf451, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf452 = buf451; del buf451 # reuse # Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.add] triton_poi_fused_add_44.run(buf452, buf418, 262144, grid=grid(262144), stream=stream0) buf453 = buf442; del buf442 # reuse buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf456 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_36], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf452, buf453, buf454, buf456, 128, 2048, grid=grid(128), stream=stream0) buf457 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32) # Topologically Sorted Source Nodes: [group_norm_36, out_48], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_42.run(buf452, buf453, buf454, primals_114, primals_115, buf457, 262144, grid=grid(262144), stream=stream0) del primals_115 buf459 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf461 = reinterpret_tensor(buf459, (8192, 1, 1, 1), (1, 1, 1, 1), 0); del buf459 # reuse buf462 = empty_strided_cuda((8192, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_40, sub_40, add_52, sqrt_40, w_40], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_45.run(buf461, primals_116, buf462, 8192, 4096, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [residual_3], Original ATen: [aten.convolution] buf463 = extern_kernels.convolution(buf457, buf462, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf463, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf465 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf467 = reinterpret_tensor(buf465, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf465 # reuse buf468 = empty_strided_cuda((2048, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_41, sub_41, add_53, sqrt_41, w_41], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_46.run(buf467, primals_117, buf468, 2048, 4096, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_49], Original ATen: [aten.convolution] buf469 = extern_kernels.convolution(buf457, buf468, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf469, (4, 2048, 4, 4), (32768, 1, 8192, 2048)) buf470 = buf454; del buf454 # reuse buf471 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf473 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_37], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_47.run(buf469, buf470, buf471, buf473, 128, 1024, grid=grid(128), stream=stream0) buf474 = empty_strided_cuda((4, 2048, 4, 4), (32768, 1, 8192, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_37, relu_37], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_48.run(buf469, buf470, buf471, primals_118, primals_119, buf474, 131072, grid=grid(131072), stream=stream0) del primals_119 buf476 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf478 = reinterpret_tensor(buf476, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf476 # reuse buf479 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_42, sub_42, add_54, sqrt_42, w_42], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_49.run(buf478, buf14, buf479, 2048, 18432, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_50], Original ATen: [aten.convolution] buf480 = extern_kernels.convolution(buf474, buf479, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf480, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf481 = buf471; del buf471 # reuse buf482 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf484 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_38], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_50.run(buf480, buf481, buf482, buf484, 128, 256, grid=grid(128), stream=stream0) buf485 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_38, relu_38], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_51.run(buf480, buf481, buf482, primals_121, primals_122, buf485, 32768, grid=grid(32768), stream=stream0) del primals_122 buf487 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf489 = reinterpret_tensor(buf487, (8192, 1, 1, 1), (1, 1, 1, 1), 0); del buf487 # reuse buf490 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_43, sub_43, add_55, sqrt_43, w_43], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_52.run(buf489, primals_123, buf490, 8192, 2048, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [out_51], Original ATen: [aten.convolution] buf491 = extern_kernels.convolution(buf485, buf490, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf491, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf492 = buf463; del buf463 # reuse # Topologically Sorted Source Nodes: [input_16], Original ATen: [aten.add] triton_poi_fused_add_53.run(buf492, buf491, 131072, grid=grid(131072), stream=stream0) buf493 = buf482; del buf482 # reuse buf494 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf496 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_39], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_54.run(buf492, buf493, buf494, buf496, 128, 1024, grid=grid(128), stream=stream0) buf497 = buf491; del buf491 # reuse # Topologically Sorted Source Nodes: [group_norm_39, out_52], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_55.run(buf492, buf493, buf494, primals_124, primals_125, buf497, 131072, grid=grid(131072), stream=stream0) del primals_125 buf499 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf501 = reinterpret_tensor(buf499, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf499 # reuse buf502 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) # Topologically Sorted Source Nodes: [var_mean_44, sub_44, add_57, sqrt_44, w_44], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_56.run(buf501, primals_126, buf502, 2048, 8192, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_53], Original ATen: [aten.convolution] buf503 = extern_kernels.convolution(buf497, buf502, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf503, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf504 = buf494; del buf494 # reuse buf505 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf507 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_40], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_50.run(buf503, buf504, buf505, buf507, 128, 256, grid=grid(128), stream=stream0) buf508 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_40, relu_40], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_51.run(buf503, buf504, buf505, primals_127, primals_128, buf508, 32768, grid=grid(32768), stream=stream0) del primals_128 buf510 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf512 = reinterpret_tensor(buf510, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf510 # reuse buf513 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_45, sub_45, add_58, sqrt_45, w_45], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_49.run(buf512, buf15, buf513, 2048, 18432, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_54], Original ATen: [aten.convolution] buf514 = extern_kernels.convolution(buf508, buf513, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf514, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf515 = buf505; del buf505 # reuse buf516 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf518 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_41], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_50.run(buf514, buf515, buf516, buf518, 128, 256, grid=grid(128), stream=stream0) buf519 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_41, relu_41], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_51.run(buf514, buf515, buf516, primals_130, primals_131, buf519, 32768, grid=grid(32768), stream=stream0) del primals_131 buf521 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf523 = reinterpret_tensor(buf521, (8192, 1, 1, 1), (1, 1, 1, 1), 0); del buf521 # reuse buf524 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_46, sub_46, add_59, sqrt_46, w_46], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_52.run(buf523, primals_132, buf524, 8192, 2048, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [out_55], Original ATen: [aten.convolution] buf525 = extern_kernels.convolution(buf519, buf524, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf525, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf526 = buf525; del buf525 # reuse # Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.add] triton_poi_fused_add_57.run(buf526, buf492, 131072, grid=grid(131072), stream=stream0) buf527 = buf516; del buf516 # reuse buf528 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf530 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_42], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_54.run(buf526, buf527, buf528, buf530, 128, 1024, grid=grid(128), stream=stream0) buf531 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192), torch.float32) # Topologically Sorted Source Nodes: [group_norm_42, out_56], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_55.run(buf526, buf527, buf528, primals_133, primals_134, buf531, 131072, grid=grid(131072), stream=stream0) del primals_134 buf533 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf535 = reinterpret_tensor(buf533, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf533 # reuse buf536 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) # Topologically Sorted Source Nodes: [var_mean_47, sub_47, add_61, sqrt_47, w_47], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_56.run(buf535, primals_135, buf536, 2048, 8192, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_57], Original ATen: [aten.convolution] buf537 = extern_kernels.convolution(buf531, buf536, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf537, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf538 = buf528; del buf528 # reuse buf539 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf541 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_43], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_50.run(buf537, buf538, buf539, buf541, 128, 256, grid=grid(128), stream=stream0) buf542 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_43, relu_43], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_51.run(buf537, buf538, buf539, primals_136, primals_137, buf542, 32768, grid=grid(32768), stream=stream0) del primals_137 buf544 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf546 = reinterpret_tensor(buf544, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf544 # reuse buf547 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_48, sub_48, add_62, sqrt_48, w_48], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_49.run(buf546, buf16, buf547, 2048, 18432, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_58], Original ATen: [aten.convolution] buf548 = extern_kernels.convolution(buf542, buf547, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf548, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf549 = buf539; del buf539 # reuse buf550 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf552 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_44], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_50.run(buf548, buf549, buf550, buf552, 128, 256, grid=grid(128), stream=stream0) buf553 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_44, relu_44], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_51.run(buf548, buf549, buf550, primals_139, primals_140, buf553, 32768, grid=grid(32768), stream=stream0) del primals_140 buf555 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf557 = reinterpret_tensor(buf555, (8192, 1, 1, 1), (1, 1, 1, 1), 0); del buf555 # reuse buf558 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_49, sub_49, add_63, sqrt_49, w_49], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_52.run(buf557, primals_141, buf558, 8192, 2048, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [out_59], Original ATen: [aten.convolution] buf559 = extern_kernels.convolution(buf553, buf558, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf559, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf560 = buf559; del buf559 # reuse # Topologically Sorted Source Nodes: [input_18], Original ATen: [aten.add] triton_poi_fused_add_57.run(buf560, buf526, 131072, grid=grid(131072), stream=stream0) buf561 = buf550; del buf550 # reuse buf562 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf564 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_45], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_54.run(buf560, buf561, buf562, buf564, 128, 1024, grid=grid(128), stream=stream0) buf565 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192), torch.float32) # Topologically Sorted Source Nodes: [group_norm_45, out_60], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_55.run(buf560, buf561, buf562, primals_142, primals_143, buf565, 131072, grid=grid(131072), stream=stream0) del primals_143 buf567 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf569 = reinterpret_tensor(buf567, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf567 # reuse buf570 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) # Topologically Sorted Source Nodes: [var_mean_50, sub_50, add_65, sqrt_50, w_50], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_56.run(buf569, primals_144, buf570, 2048, 8192, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_61], Original ATen: [aten.convolution] buf571 = extern_kernels.convolution(buf565, buf570, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf571, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf572 = buf562; del buf562 # reuse buf573 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf575 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_46], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_50.run(buf571, buf572, buf573, buf575, 128, 256, grid=grid(128), stream=stream0) buf576 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_46, relu_46], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_51.run(buf571, buf572, buf573, primals_145, primals_146, buf576, 32768, grid=grid(32768), stream=stream0) del primals_146 buf578 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf580 = reinterpret_tensor(buf578, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf578 # reuse buf581 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_51, sub_51, add_66, sqrt_51, w_51], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_49.run(buf580, buf17, buf581, 2048, 18432, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [out_62], Original ATen: [aten.convolution] buf582 = extern_kernels.convolution(buf576, buf581, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf582, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf583 = buf573; del buf573 # reuse buf584 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf586 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_47], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_50.run(buf582, buf583, buf584, buf586, 128, 256, grid=grid(128), stream=stream0) buf587 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) # Topologically Sorted Source Nodes: [group_norm_47, relu_47], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_51.run(buf582, buf583, buf584, primals_148, primals_149, buf587, 32768, grid=grid(32768), stream=stream0) del primals_149 buf589 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf591 = reinterpret_tensor(buf589, (8192, 1, 1, 1), (1, 1, 1, 1), 0); del buf589 # reuse buf592 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_52, sub_52, add_67, sqrt_52, w_52], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_52.run(buf591, primals_150, buf592, 8192, 2048, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [out_63], Original ATen: [aten.convolution] buf593 = extern_kernels.convolution(buf587, buf592, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf593, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf594 = buf593; del buf593 # reuse # Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.add] triton_poi_fused_add_57.run(buf594, buf560, 131072, grid=grid(131072), stream=stream0) buf595 = reinterpret_tensor(buf584, (4, 32, 1, 1), (32, 1, 32, 32), 0); del buf584 # reuse buf596 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf598 = reinterpret_tensor(buf596, (4, 32, 1, 1), (32, 1, 32, 32), 0); del buf596 # reuse # Topologically Sorted Source Nodes: [input_20], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_58.run(buf598, buf594, buf595, 128, 1024, grid=grid(128), stream=stream0) buf599 = empty_strided_cuda((4, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) # Topologically Sorted Source Nodes: [input_20, input_21, input_22], Original ATen: [aten.native_group_norm, aten.relu, aten.mean] triton_poi_fused_mean_native_group_norm_relu_59.run(buf594, buf595, buf598, primals_151, primals_152, buf599, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution] buf600 = extern_kernels.convolution(buf599, primals_153, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf600, (4, 21843, 1, 1), (21843, 1, 21843, 21843)) buf601 = reinterpret_tensor(buf600, (4, 21843, 1, 1), (21843, 1, 87372, 87372), 0); del buf600 # reuse # Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution] triton_poi_fused_convolution_60.run(buf601, primals_154, 87372, grid=grid(87372), stream=stream0) del primals_154 return (reinterpret_tensor(buf601, (4, 21843), (21843, 1), 0), buf0, buf1, primals_3, primals_5, primals_6, primals_7, buf2, primals_10, primals_12, primals_13, primals_15, primals_16, buf3, primals_19, primals_21, primals_22, primals_24, primals_25, buf4, primals_28, primals_30, primals_31, primals_33, primals_34, buf5, primals_37, primals_39, primals_40, primals_42, primals_43, primals_44, buf6, primals_47, primals_49, primals_50, primals_52, primals_53, buf7, primals_56, primals_58, primals_59, primals_61, primals_62, buf8, primals_65, primals_67, primals_68, primals_70, primals_71, buf9, primals_74, primals_76, primals_77, primals_79, primals_80, primals_81, buf10, primals_84, primals_86, primals_87, primals_89, primals_90, buf11, primals_93, primals_95, primals_96, primals_98, primals_99, buf12, primals_102, primals_104, primals_105, primals_107, primals_108, buf13, primals_111, primals_113, primals_114, primals_116, primals_117, primals_118, buf14, primals_121, primals_123, primals_124, primals_126, primals_127, buf15, primals_130, primals_132, primals_133, primals_135, primals_136, buf16, primals_139, primals_141, primals_142, primals_144, primals_145, buf17, primals_148, primals_150, primals_151, primals_152, primals_153, buf21, buf22, buf24, buf25, buf26, reinterpret_tensor(buf27, (4, 32), (32, 1), 0), reinterpret_tensor(buf30, (4, 32), (32, 1), 0), buf31, buf35, buf36, buf41, buf42, buf43, reinterpret_tensor(buf44, (4, 32), (32, 1), 0), reinterpret_tensor(buf47, (4, 32), (32, 1), 0), buf48, buf52, buf53, buf54, reinterpret_tensor(buf55, (4, 32), (32, 1), 0), reinterpret_tensor(buf58, (4, 32), (32, 1), 0), buf59, buf63, buf64, buf66, reinterpret_tensor(buf67, (4, 32), (32, 1), 0), reinterpret_tensor(buf70, (4, 32), (32, 1), 0), buf71, buf75, buf76, buf77, reinterpret_tensor(buf78, (4, 32), (32, 1), 0), reinterpret_tensor(buf81, (4, 32), (32, 1), 0), buf82, buf86, buf87, buf88, reinterpret_tensor(buf89, (4, 32), (32, 1), 0), reinterpret_tensor(buf92, (4, 32), (32, 1), 0), buf93, buf97, buf98, buf100, reinterpret_tensor(buf101, (4, 32), (32, 1), 0), reinterpret_tensor(buf104, (4, 32), (32, 1), 0), buf105, buf109, buf110, buf111, reinterpret_tensor(buf112, (4, 32), (32, 1), 0), reinterpret_tensor(buf115, (4, 32), (32, 1), 0), buf116, buf120, buf121, buf122, reinterpret_tensor(buf123, (4, 32), (32, 1), 0), reinterpret_tensor(buf126, (4, 32), (32, 1), 0), buf127, buf131, buf132, buf134, reinterpret_tensor(buf135, (4, 32), (32, 1), 0), reinterpret_tensor(buf138, (4, 32), (32, 1), 0), buf139, buf143, buf144, buf145, reinterpret_tensor(buf146, (4, 32), (32, 1), 0), reinterpret_tensor(buf149, (4, 32), (32, 1), 0), buf150, buf154, buf155, buf156, reinterpret_tensor(buf157, (4, 32), (32, 1), 0), reinterpret_tensor(buf160, (4, 32), (32, 1), 0), buf161, buf165, buf166, buf168, reinterpret_tensor(buf169, (4, 32), (32, 1), 0), reinterpret_tensor(buf172, (4, 32), (32, 1), 0), buf173, buf177, buf178, buf183, buf184, buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0), reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194, buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0), reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205, buf206, buf208, reinterpret_tensor(buf209, (4, 32), (32, 1), 0), reinterpret_tensor(buf212, (4, 32), (32, 1), 0), buf213, buf217, buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228, buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0), reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239, buf240, buf242, reinterpret_tensor(buf243, (4, 32), (32, 1), 0), reinterpret_tensor(buf246, (4, 32), (32, 1), 0), buf247, buf251, buf252, buf253, reinterpret_tensor(buf254, (4, 32), (32, 1), 0), reinterpret_tensor(buf257, (4, 32), (32, 1), 0), buf258, buf262, buf263, buf264, reinterpret_tensor(buf265, (4, 32), (32, 1), 0), reinterpret_tensor(buf268, (4, 32), (32, 1), 0), buf269, buf273, buf274, buf276, reinterpret_tensor(buf277, (4, 32), (32, 1), 0), reinterpret_tensor(buf280, (4, 32), (32, 1), 0), buf281, buf285, buf286, buf287, reinterpret_tensor(buf288, (4, 32), (32, 1), 0), reinterpret_tensor(buf291, (4, 32), (32, 1), 0), buf292, buf296, buf297, buf298, reinterpret_tensor(buf299, (4, 32), (32, 1), 0), reinterpret_tensor(buf302, (4, 32), (32, 1), 0), buf303, buf307, buf308, buf310, reinterpret_tensor(buf311, (4, 32), (32, 1), 0), reinterpret_tensor(buf314, (4, 32), (32, 1), 0), buf315, buf319, buf320, buf325, buf326, buf327, reinterpret_tensor(buf328, (4, 32), (32, 1), 0), reinterpret_tensor(buf331, (4, 32), (32, 1), 0), buf332, buf336, buf337, buf338, reinterpret_tensor(buf339, (4, 32), (32, 1), 0), reinterpret_tensor(buf342, (4, 32), (32, 1), 0), buf343, buf347, buf348, buf350, reinterpret_tensor(buf351, (4, 32), (32, 1), 0), reinterpret_tensor(buf354, (4, 32), (32, 1), 0), buf355, buf359, buf360, buf361, reinterpret_tensor(buf362, (4, 32), (32, 1), 0), reinterpret_tensor(buf365, (4, 32), (32, 1), 0), buf366, buf370, buf371, buf372, reinterpret_tensor(buf373, (4, 32), (32, 1), 0), reinterpret_tensor(buf376, (4, 32), (32, 1), 0), buf377, buf381, buf382, buf384, reinterpret_tensor(buf385, (4, 32), (32, 1), 0), reinterpret_tensor(buf388, (4, 32), (32, 1), 0), buf389, buf393, buf394, buf395, reinterpret_tensor(buf396, (4, 32), (32, 1), 0), reinterpret_tensor(buf399, (4, 32), (32, 1), 0), buf400, buf404, buf405, buf406, reinterpret_tensor(buf407, (4, 32), (32, 1), 0), reinterpret_tensor(buf410, (4, 32), (32, 1), 0), buf411, buf415, buf416, buf418, reinterpret_tensor(buf419, (4, 32), (32, 1), 0), reinterpret_tensor(buf422, (4, 32), (32, 1), 0), buf423, buf427, buf428, buf429, reinterpret_tensor(buf430, (4, 32), (32, 1), 0), reinterpret_tensor(buf433, (4, 32), (32, 1), 0), buf434, buf438, buf439, buf440, reinterpret_tensor(buf441, (4, 32), (32, 1), 0), reinterpret_tensor(buf444, (4, 32), (32, 1), 0), buf445, buf449, buf450, buf452, reinterpret_tensor(buf453, (4, 32), (32, 1), 0), reinterpret_tensor(buf456, (4, 32), (32, 1), 0), buf457, buf461, buf462, buf467, buf468, buf469, reinterpret_tensor(buf470, (4, 32), (32, 1), 0), reinterpret_tensor(buf473, (4, 32), (32, 1), 0), buf474, buf478, buf479, buf480, reinterpret_tensor(buf481, (4, 32), (32, 1), 0), reinterpret_tensor(buf484, (4, 32), (32, 1), 0), buf485, buf489, buf490, buf492, reinterpret_tensor(buf493, (4, 32), (32, 1), 0), reinterpret_tensor(buf496, (4, 32), (32, 1), 0), buf497, buf501, buf502, buf503, reinterpret_tensor(buf504, (4, 32), (32, 1), 0), reinterpret_tensor(buf507, (4, 32), (32, 1), 0), buf508, buf512, buf513, buf514, reinterpret_tensor(buf515, (4, 32), (32, 1), 0), reinterpret_tensor(buf518, (4, 32), (32, 1), 0), buf519, buf523, buf524, buf526, reinterpret_tensor(buf527, (4, 32), (32, 1), 0), reinterpret_tensor(buf530, (4, 32), (32, 1), 0), buf531, buf535, buf536, buf537, reinterpret_tensor(buf538, (4, 32), (32, 1), 0), reinterpret_tensor(buf541, (4, 32), (32, 1), 0), buf542, buf546, buf547, buf548, reinterpret_tensor(buf549, (4, 32), (32, 1), 0), reinterpret_tensor(buf552, (4, 32), (32, 1), 0), buf553, buf557, buf558, buf560, reinterpret_tensor(buf561, (4, 32), (32, 1), 0), reinterpret_tensor(buf564, (4, 32), (32, 1), 0), buf565, buf569, buf570, buf571, reinterpret_tensor(buf572, (4, 32), (32, 1), 0), reinterpret_tensor(buf575, (4, 32), (32, 1), 0), buf576, buf580, buf581, buf582, reinterpret_tensor(buf583, (4, 32), (32, 1), 0), reinterpret_tensor(buf586, (4, 32), (32, 1), 0), buf587, buf591, buf592, buf594, buf595, buf598, buf599, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((2048, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((512, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_48 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_49 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_50 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_51 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_52 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_53 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_54 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_55 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_56 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_57 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_58 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_59 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_60 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_61 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_62 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_63 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_64 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_65 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_66 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_67 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_68 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_69 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_70 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_71 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_72 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_73 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_74 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_75 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_76 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_77 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_78 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_79 = rand_strided((4096, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_80 = rand_strided((1024, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_81 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_82 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_83 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_84 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_85 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_86 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_87 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_88 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_89 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_90 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_91 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_92 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_93 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_94 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_95 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_96 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_97 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_98 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_99 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_100 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_101 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_102 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_103 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_104 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_105 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_106 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_107 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_108 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_109 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_110 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_111 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_112 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_113 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_114 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_115 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_116 = rand_strided((8192, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_117 = rand_strided((2048, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_118 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_119 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_120 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_121 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_122 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_123 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_124 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32) primals_125 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32) primals_126 = rand_strided((2048, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_127 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_128 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_129 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_130 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_131 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_132 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_133 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32) primals_134 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32) primals_135 = rand_strided((2048, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_136 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_137 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_138 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_139 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_140 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_141 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_142 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32) primals_143 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32) primals_144 = rand_strided((2048, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_145 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_146 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_147 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_148 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_149 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_150 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_151 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32) primals_152 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32) primals_153 = rand_strided((21843, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_154 = rand_strided((21843, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn from collections import OrderedDict def conv3x3(cin, cout, stride=1, groups=1, bias=False): return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) def conv1x1(cin, cout, stride=1, bias=False): return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias) def tf2th(conv_weights): """Possibly convert HWIO to OIHW.""" if conv_weights.ndim == 4: conv_weights = conv_weights.transpose([3, 2, 0, 1]) return torch.from_numpy(conv_weights) class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-10) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) class PreActBottleneck(nn.Module): """Pre-activation (v2) bottleneck block. Follows the implementation of "Identity Mappings in Deep Residual Networks": https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua Except it puts the stride on 3x3 conv when available. """ def __init__(self, cin, cout=None, cmid=None, stride=1): super().__init__() cout = cout or cin cmid = cmid or cout // 4 self.gn1 = nn.GroupNorm(32, cin) self.conv1 = conv1x1(cin, cmid) self.gn2 = nn.GroupNorm(32, cmid) self.conv2 = conv3x3(cmid, cmid, stride) self.gn3 = nn.GroupNorm(32, cmid) self.conv3 = conv1x1(cmid, cout) self.relu = nn.ReLU(inplace=True) if stride != 1 or cin != cout: self.downsample = conv1x1(cin, cout, stride) def forward(self, x): out = self.relu(self.gn1(x)) residual = x if hasattr(self, 'downsample'): residual = self.downsample(out) out = self.conv1(out) out = self.conv2(self.relu(self.gn2(out))) out = self.conv3(self.relu(self.gn3(out))) return out + residual def load_from(self, weights, prefix=''): convname = 'standardized_conv2d' with torch.no_grad(): self.conv1.weight.copy_(tf2th(weights[ f'{prefix}a/{convname}/kernel'])) self.conv2.weight.copy_(tf2th(weights[ f'{prefix}b/{convname}/kernel'])) self.conv3.weight.copy_(tf2th(weights[ f'{prefix}c/{convname}/kernel'])) self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma']) ) self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma']) ) self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma']) ) self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta'])) self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta'])) self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta'])) if hasattr(self, 'downsample'): w = weights[f'{prefix}a/proj/{convname}/kernel'] self.downsample.weight.copy_(tf2th(w)) class ResNetV2(nn.Module): """Implementation of Pre-activation (v2) ResNet mode.""" def __init__(self, block_units, width_factor, head_size=21843, zero_head=False): super().__init__() wf = width_factor self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 * wf, kernel_size=7, stride=2, padding=3, bias=False)), ('pad', nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential( OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 * wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin= 256 * wf, cout=256 * wf, cmid=64 * wf)) for i in range(2, block_units[0] + 1)]))), ('block2', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid= 128 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin= 512 * wf, cout=512 * wf, cmid=128 * wf)) for i in range(2, block_units[1] + 1)]))), ('block3', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid= 256 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin= 1024 * wf, cout=1024 * wf, cmid=256 * wf)) for i in range(2, block_units[2] + 1)]))), ('block4', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid =512 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin =2048 * wf, cout=2048 * wf, cmid=512 * wf)) for i in range(2, block_units[3] + 1)])))])) self.zero_head = zero_head self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, 2048 * wf)), ('relu', nn.ReLU(inplace=True)), ('avg', nn. AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d(2048 * wf, head_size, kernel_size=1, bias=True))])) def forward(self, x): x = self.head(self.body(self.root(x))) assert x.shape[-2:] == (1, 1) return x[..., 0, 0] def load_from(self, weights, prefix='resnet/'): with torch.no_grad(): self.root.conv.weight.copy_(tf2th(weights[ f'{prefix}root_block/standardized_conv2d/kernel'])) self.head.gn.weight.copy_(tf2th(weights[ f'{prefix}group_norm/gamma'])) self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta'])) if self.zero_head: nn.init.zeros_(self.head.conv.weight) nn.init.zeros_(self.head.conv.bias) else: self.head.conv.weight.copy_(tf2th(weights[ f'{prefix}head/conv2d/kernel'])) self.head.conv.bias.copy_(tf2th(weights[ f'{prefix}head/conv2d/bias'])) for bname, block in self.body.named_children(): for uname, unit in block.named_children(): unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/') def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'block_units': [4, 4, 4, 4], 'width_factor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 768 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 2048 y1 = yindex // 2048 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 2048 * x2 + 18432 * y1), tmp0, xmask) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_6(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 rnumel = 147 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 147, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask & xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 147.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-10 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask) @triton.jit def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 8704 % 34 x1 = xindex // 256 % 34 x3 = xindex // 295936 x4 = xindex % 8704 x6 = xindex tmp0 = -1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x1 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-8448 + x4 + 8192 * x2 + 262144 * x3), tmp10, other=0.0) tl.store(out_ptr0 + x6, tmp11, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 16 x2 = xindex // 4096 % 16 x3 = xindex // 65536 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp5 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp7 = tl.load(in_ptr0 + (8960 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp9 = tl.load(in_ptr0 + (9216 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp11 = tl.load(in_ptr0 + (17408 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp13 = tl.load(in_ptr0 + (17664 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp15 = tl.load(in_ptr0 + (17920 + x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x4, tmp16, None) tl.store(out_ptr1 + x4, tmp41, None) @triton.jit def triton_red_fused_native_group_norm_9(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = rindex // 8 tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_12(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_13(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 256 rnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2304.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2304 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_out_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = rindex // 32 tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 262144 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 262144 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 8192.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_poi_fused_add_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_20(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_21(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 16 r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 131072 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 4096.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_23(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4608.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4608 * x0), tmp12, rmask & xmask) @triton.jit def triton_per_fused_native_group_norm_24(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = rindex // 16 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_25(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_26(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 512 * x0), tmp20, None) @triton.jit def triton_poi_fused_add_27(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_out_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 64 r3 = rindex // 64 tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 4096.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_31(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_33(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_34(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = rindex // 32 tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_36(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 9216.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 9216 * x0), tmp12, rmask & xmask) @triton.jit def triton_per_fused_native_group_norm_37(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 32 r3 = rindex // 32 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_38(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 16384 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 512.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_39(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-10 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_poi_fused_add_40(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_out_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 128 r3 = rindex // 128 tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_42(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_44(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_45(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_46(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask) @triton.jit def triton_per_fused_native_group_norm_47(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 64 r3 = rindex // 64 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_48(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_49(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 18432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 18432 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 18432.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 18432 * x0), rmask, eviction_policy ='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 18432 * x0), tmp12, rmask) @triton.jit def triton_per_fused_native_group_norm_50(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 64 r3 = rindex // 64 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 8192 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_51(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 8192 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_52(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask) @triton.jit def triton_poi_fused_add_53(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.load(in_out_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_54(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 256 r3 = rindex // 256 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 256 * x0 + 8192 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_55(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 8192 x2 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 256), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 256), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_56(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-10 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 8192 * x0), tmp12, rmask) @triton.jit def triton_poi_fused_add_57(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_58(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 256 r3 = rindex // 256 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 256 * x0 + 8192 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) @triton.jit def triton_poi_fused_mean_native_group_norm_relu_59(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8192 x1 = xindex // 8192 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 32768 * x1), None) tmp1 = tl.load(in_ptr1 + x2 // 256, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2 // 256, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (8192 + x0 + 32768 * x1), None) tmp18 = tl.load(in_ptr0 + (16384 + x0 + 32768 * x1), None) tmp25 = tl.load(in_ptr0 + (24576 + x0 + 32768 * x1), None) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = tmp11 - tmp1 tmp13 = tmp12 * tmp3 tmp14 = tmp13 * tmp5 tmp15 = tmp14 + tmp7 tmp16 = triton_helpers.maximum(tmp9, tmp15) tmp17 = tmp10 + tmp16 tmp19 = tmp18 - tmp1 tmp20 = tmp19 * tmp3 tmp21 = tmp20 * tmp5 tmp22 = tmp21 + tmp7 tmp23 = triton_helpers.maximum(tmp9, tmp22) tmp24 = tmp17 + tmp23 tmp26 = tmp25 - tmp1 tmp27 = tmp26 * tmp3 tmp28 = tmp27 * tmp5 tmp29 = tmp28 + tmp7 tmp30 = triton_helpers.maximum(tmp9, tmp29) tmp31 = tmp24 + tmp30 tmp32 = 4.0 tmp33 = tmp31 / tmp32 tl.store(out_ptr0 + x2, tmp33, None) @triton.jit def triton_poi_fused_convolution_60(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 87372 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 21843 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154) = args args.clear() assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256,), (1,)) assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_6, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256,), (1,)) assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_10, (256,), (1,)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_13, (1024,), (1,)) assert_size_stride(primals_14, (1024,), (1,)) assert_size_stride(primals_15, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_16, (256,), (1,)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (256,), (1,)) assert_size_stride(primals_20, (256,), (1,)) assert_size_stride(primals_21, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_22, (1024,), (1,)) assert_size_stride(primals_23, (1024,), (1,)) assert_size_stride(primals_24, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_25, (256,), (1,)) assert_size_stride(primals_26, (256,), (1,)) assert_size_stride(primals_27, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_28, (256,), (1,)) assert_size_stride(primals_29, (256,), (1,)) assert_size_stride(primals_30, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_31, (1024,), (1,)) assert_size_stride(primals_32, (1024,), (1,)) assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_34, (256,), (1,)) assert_size_stride(primals_35, (256,), (1,)) assert_size_stride(primals_36, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (256,), (1,)) assert_size_stride(primals_38, (256,), (1,)) assert_size_stride(primals_39, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_40, (1024,), (1,)) assert_size_stride(primals_41, (1024,), (1,)) assert_size_stride(primals_42, (2048, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_43, (512, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_44, (512,), (1,)) assert_size_stride(primals_45, (512,), (1,)) assert_size_stride(primals_46, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_47, (512,), (1,)) assert_size_stride(primals_48, (512,), (1,)) assert_size_stride(primals_49, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_50, (2048,), (1,)) assert_size_stride(primals_51, (2048,), (1,)) assert_size_stride(primals_52, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_53, (512,), (1,)) assert_size_stride(primals_54, (512,), (1,)) assert_size_stride(primals_55, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_56, (512,), (1,)) assert_size_stride(primals_57, (512,), (1,)) assert_size_stride(primals_58, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_59, (2048,), (1,)) assert_size_stride(primals_60, (2048,), (1,)) assert_size_stride(primals_61, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_62, (512,), (1,)) assert_size_stride(primals_63, (512,), (1,)) assert_size_stride(primals_64, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_65, (512,), (1,)) assert_size_stride(primals_66, (512,), (1,)) assert_size_stride(primals_67, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_68, (2048,), (1,)) assert_size_stride(primals_69, (2048,), (1,)) assert_size_stride(primals_70, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_71, (512,), (1,)) assert_size_stride(primals_72, (512,), (1,)) assert_size_stride(primals_73, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_74, (512,), (1,)) assert_size_stride(primals_75, (512,), (1,)) assert_size_stride(primals_76, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_77, (2048,), (1,)) assert_size_stride(primals_78, (2048,), (1,)) assert_size_stride(primals_79, (4096, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_80, (1024, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_81, (1024,), (1,)) assert_size_stride(primals_82, (1024,), (1,)) assert_size_stride(primals_83, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_84, (1024,), (1,)) assert_size_stride(primals_85, (1024,), (1,)) assert_size_stride(primals_86, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_87, (4096,), (1,)) assert_size_stride(primals_88, (4096,), (1,)) assert_size_stride(primals_89, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_90, (1024,), (1,)) assert_size_stride(primals_91, (1024,), (1,)) assert_size_stride(primals_92, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_93, (1024,), (1,)) assert_size_stride(primals_94, (1024,), (1,)) assert_size_stride(primals_95, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_96, (4096,), (1,)) assert_size_stride(primals_97, (4096,), (1,)) assert_size_stride(primals_98, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_99, (1024,), (1,)) assert_size_stride(primals_100, (1024,), (1,)) assert_size_stride(primals_101, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_102, (1024,), (1,)) assert_size_stride(primals_103, (1024,), (1,)) assert_size_stride(primals_104, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_105, (4096,), (1,)) assert_size_stride(primals_106, (4096,), (1,)) assert_size_stride(primals_107, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_108, (1024,), (1,)) assert_size_stride(primals_109, (1024,), (1,)) assert_size_stride(primals_110, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_111, (1024,), (1,)) assert_size_stride(primals_112, (1024,), (1,)) assert_size_stride(primals_113, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_114, (4096,), (1,)) assert_size_stride(primals_115, (4096,), (1,)) assert_size_stride(primals_116, (8192, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_117, (2048, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_118, (2048,), (1,)) assert_size_stride(primals_119, (2048,), (1,)) assert_size_stride(primals_120, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_121, (2048,), (1,)) assert_size_stride(primals_122, (2048,), (1,)) assert_size_stride(primals_123, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_124, (8192,), (1,)) assert_size_stride(primals_125, (8192,), (1,)) assert_size_stride(primals_126, (2048, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_127, (2048,), (1,)) assert_size_stride(primals_128, (2048,), (1,)) assert_size_stride(primals_129, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_130, (2048,), (1,)) assert_size_stride(primals_131, (2048,), (1,)) assert_size_stride(primals_132, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_133, (8192,), (1,)) assert_size_stride(primals_134, (8192,), (1,)) assert_size_stride(primals_135, (2048, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_136, (2048,), (1,)) assert_size_stride(primals_137, (2048,), (1,)) assert_size_stride(primals_138, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_139, (2048,), (1,)) assert_size_stride(primals_140, (2048,), (1,)) assert_size_stride(primals_141, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_142, (8192,), (1,)) assert_size_stride(primals_143, (8192,), (1,)) assert_size_stride(primals_144, (2048, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_145, (2048,), (1,)) assert_size_stride(primals_146, (2048,), (1,)) assert_size_stride(primals_147, (2048, 2048, 3, 3), (18432, 9, 3, 1)) assert_size_stride(primals_148, (2048,), (1,)) assert_size_stride(primals_149, (2048,), (1,)) assert_size_stride(primals_150, (8192, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_151, (8192,), (1,)) assert_size_stride(primals_152, (8192,), (1,)) assert_size_stride(primals_153, (21843, 8192, 1, 1), (8192, 1, 1, 1)) assert_size_stride(primals_154, (21843,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_9, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_9 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_18, buf3, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_27, buf4, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_27 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_36, buf5, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_36 buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_46, buf6, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_46 buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_55, buf7, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_55 buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_64, buf8, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_64 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_73, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_73 buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_83, buf10, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_83 buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_92, buf11, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_92 buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_101, buf12, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_101 buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_110, buf13, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_110 buf14 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_poi_fused_5[grid(4194304, 9)](primals_120, buf14, 4194304, 9, XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1) del primals_120 buf15 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_poi_fused_5[grid(4194304, 9)](primals_129, buf15, 4194304, 9, XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1) del primals_129 buf16 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_poi_fused_5[grid(4194304, 9)](primals_138, buf16, 4194304, 9, XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1) del primals_138 buf17 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_poi_fused_5[grid(4194304, 9)](primals_147, buf17, 4194304, 9, XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1) del primals_147 buf19 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf21 = reinterpret_tensor(buf19, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf19 buf22 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch. float32) triton_per_fused_add_div_sqrt_sub_var_mean_6[grid(256)](buf21, buf0, buf22, 256, 147, XBLOCK=1, num_warps=2, num_stages=1) buf23 = extern_kernels.convolution(buf1, buf22, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 256, 32, 32), (262144, 1, 8192, 256)) buf24 = empty_strided_cuda((4, 256, 34, 34), (295936, 1, 8704, 256), torch.float32) triton_poi_fused_constant_pad_nd_7[grid(1183744)](buf23, buf24, 1183744, XBLOCK=1024, num_warps=4, num_stages=1) buf25 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) buf26 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(262144)](buf24, buf25, buf26, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf27 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf28 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf30 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_9[grid(128)](buf25, buf27, buf28, buf30, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf31 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf25, buf27, buf28, primals_3, primals_4, buf31, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_4 buf33 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf35 = reinterpret_tensor(buf33, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf33 buf36 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf35, primals_5, buf36, 1024, 256, num_warps=2, num_stages=1) buf37 = extern_kernels.convolution(buf31, buf36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf39 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf41 = reinterpret_tensor(buf39, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf39 buf42 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_12[grid(256)](buf41, primals_6, buf42, 256, 256, num_warps=2, num_stages=1) buf43 = extern_kernels.convolution(buf31, buf42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf44 = buf28 del buf28 buf45 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf47 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_9[grid(128)](buf43, buf44, buf45, buf47, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf48 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf43, buf44, buf45, primals_7, primals_8, buf48, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_8 buf50 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf52 = reinterpret_tensor(buf50, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf50 buf53 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_13[grid(256)](buf52, buf2, buf53, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf54 = extern_kernels.convolution(buf48, buf53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf55 = buf45 del buf45 buf56 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf58 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_9[grid(128)](buf54, buf55, buf56, buf58, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf59 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf54, buf55, buf56, primals_10, primals_11, buf59, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_11 buf61 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf63 = reinterpret_tensor(buf61, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf61 buf64 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf63, primals_12, buf64, 1024, 256, num_warps=2, num_stages=1) buf65 = extern_kernels.convolution(buf59, buf64, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf66 = buf37 del buf37 triton_poi_fused_add_14[grid(1048576)](buf66, buf65, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf67 = buf56 del buf56 buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf70 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_15[grid(128)](buf66, buf67, buf68, buf70, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf71 = buf65 del buf65 triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf66, buf67, buf68, primals_13, primals_14, buf71, 1048576, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_14 buf73 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf75 = reinterpret_tensor(buf73, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf73 buf76 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf75, primals_15, buf76, 256, 1024, num_warps=8, num_stages=1) buf77 = extern_kernels.convolution(buf71, buf76, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf77, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf78 = buf68 del buf68 buf79 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf81 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_9[grid(128)](buf77, buf78, buf79, buf81, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf82 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf77, buf78, buf79, primals_16, primals_17, buf82, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_17 buf84 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf86 = reinterpret_tensor(buf84, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf84 buf87 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_13[grid(256)](buf86, buf3, buf87, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf88 = extern_kernels.convolution(buf82, buf87, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf88, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf89 = buf79 del buf79 buf90 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf92 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_9[grid(128)](buf88, buf89, buf90, buf92, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf93 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf88, buf89, buf90, primals_19, primals_20, buf93, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_20 buf95 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf97 = reinterpret_tensor(buf95, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf95 buf98 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf97, primals_21, buf98, 1024, 256, num_warps=2, num_stages=1) buf99 = extern_kernels.convolution(buf93, buf98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf100 = buf99 del buf99 triton_poi_fused_add_18[grid(1048576)](buf100, buf66, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf101 = buf90 del buf90 buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf104 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf100, buf101, buf102, buf104, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf105 = reinterpret_tensor(buf23, (4, 1024, 16, 16), (262144, 1, 16384, 1024), 0) del buf23 triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf100, buf101, buf102, primals_22, primals_23, buf105, 1048576, XBLOCK =1024, num_warps=4, num_stages=1) del primals_23 buf107 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf109 = reinterpret_tensor(buf107, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf107 buf110 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf109, primals_24, buf110, 256, 1024, num_warps=8, num_stages=1) buf111 = extern_kernels.convolution(buf105, buf110, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf111, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf112 = buf102 del buf102 buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf115 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_9[grid(128)](buf111, buf112, buf113, buf115, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf116 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf111, buf112, buf113, primals_25, primals_26, buf116, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_26 buf118 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf120 = reinterpret_tensor(buf118, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf118 buf121 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_13[grid(256)](buf120, buf4, buf121, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf122 = extern_kernels.convolution(buf116, buf121, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf122, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf123 = buf113 del buf113 buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf126 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_9[grid(128)](buf122, buf123, buf124, buf126, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf127 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf122, buf123, buf124, primals_28, primals_29, buf127, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_29 buf129 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf131 = reinterpret_tensor(buf129, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf129 buf132 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf131, primals_30, buf132, 1024, 256, num_warps=2, num_stages=1) buf133 = extern_kernels.convolution(buf127, buf132, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf133, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf134 = buf133 del buf133 triton_poi_fused_add_18[grid(1048576)](buf134, buf100, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf135 = buf124 del buf124 buf136 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf138 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf134, buf135, buf136, buf138, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf139 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf134, buf135, buf136, primals_31, primals_32, buf139, 1048576, XBLOCK =1024, num_warps=4, num_stages=1) del primals_32 buf141 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf143 = reinterpret_tensor(buf141, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf141 buf144 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf143, primals_33, buf144, 256, 1024, num_warps=8, num_stages=1) buf145 = extern_kernels.convolution(buf139, buf144, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf145, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf146 = buf136 del buf136 buf147 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf149 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_9[grid(128)](buf145, buf146, buf147, buf149, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf150 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf145, buf146, buf147, primals_34, primals_35, buf150, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_35 buf152 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf154 = reinterpret_tensor(buf152, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf152 buf155 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_13[grid(256)](buf154, buf5, buf155, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf156 = extern_kernels.convolution(buf150, buf155, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf156, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf157 = buf147 del buf147 buf158 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf160 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_9[grid(128)](buf156, buf157, buf158, buf160, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf161 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf156, buf157, buf158, primals_37, primals_38, buf161, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_38 buf163 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf165 = reinterpret_tensor(buf163, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf163 buf166 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf165, primals_39, buf166, 1024, 256, num_warps=2, num_stages=1) buf167 = extern_kernels.convolution(buf161, buf166, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf167, (4, 1024, 16, 16), (262144, 1, 16384, 1024)) buf168 = buf167 del buf167 triton_poi_fused_add_18[grid(1048576)](buf168, buf134, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf169 = buf158 del buf158 buf170 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf172 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf168, buf169, buf170, buf172, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf173 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf168, buf169, buf170, primals_40, primals_41, buf173, 1048576, XBLOCK =1024, num_warps=4, num_stages=1) del primals_41 buf175 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf177 = reinterpret_tensor(buf175, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf175 buf178 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_19[grid(2048)](buf177, primals_42, buf178, 2048, 1024, num_warps=8, num_stages=1) buf179 = extern_kernels.convolution(buf173, buf178, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf179, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf181 buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_20[grid(512)](buf183, primals_43, buf184, 512, 1024, num_warps=8, num_stages=1) buf185 = extern_kernels.convolution(buf173, buf184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf185, (4, 512, 16, 16), (131072, 1, 8192, 512)) buf186 = buf170 del buf170 buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_21[grid(128)](buf185, buf186, buf187, buf189, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf190 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512 ), torch.float32) triton_poi_fused_native_group_norm_relu_22[grid(524288)](buf185, buf186, buf187, primals_44, primals_45, buf190, 524288, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_45 buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf192 buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_23[grid(512)](buf194, buf6, buf195, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf197 = buf187 del buf187 buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf196, buf197, buf198, buf200, 128, 1024, num_warps=8, num_stages=1) buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf196, buf197, buf198, primals_47, primals_48, buf201, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_48 buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf203 buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf205, primals_49, buf206, 2048, 512, num_warps=4, num_stages=1) buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf208 = buf179 del buf179 triton_poi_fused_add_27[grid(524288)](buf208, buf207, 524288, XBLOCK=512, num_warps=8, num_stages=1) buf209 = buf198 del buf198 buf210 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf212 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf208, buf209, buf210, buf212, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf213 = buf207 del buf207 triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf208, buf209, buf210, primals_50, primals_51, buf213, 524288, XBLOCK= 512, num_warps=8, num_stages=1) del primals_51 buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf215 buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf217, primals_52, buf218, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf220 = buf210 del buf210 buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf219, buf220, buf221, buf223, 128, 1024, num_warps=8, num_stages=1) buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf219, buf220, buf221, primals_53, primals_54, buf224, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_54 buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf226 buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_23[grid(512)](buf228, buf7, buf229, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf231 = buf221 del buf221 buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf230, buf231, buf232, buf234, 128, 1024, num_warps=8, num_stages=1) buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf230, buf231, buf232, primals_56, primals_57, buf235, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_57 buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf237 buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf239, primals_58, buf240, 2048, 512, num_warps=4, num_stages=1) buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf242 = buf241 del buf241 triton_poi_fused_add_31[grid(524288)](buf242, buf208, 524288, XBLOCK=512, num_warps=8, num_stages=1) buf243 = buf232 del buf232 buf244 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf246 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf242, buf243, buf244, buf246, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf247 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf242, buf243, buf244, primals_59, primals_60, buf247, 524288, XBLOCK= 512, num_warps=8, num_stages=1) del primals_60 buf249 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf251 = reinterpret_tensor(buf249, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf249 buf252 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf251, primals_61, buf252, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf253 = extern_kernels.convolution(buf247, buf252, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf253, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf254 = buf244 del buf244 buf255 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf257 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf253, buf254, buf255, buf257, 128, 1024, num_warps=8, num_stages=1) buf258 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf253, buf254, buf255, primals_62, primals_63, buf258, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_63 buf260 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf262 = reinterpret_tensor(buf260, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf260 buf263 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_23[grid(512)](buf262, buf8, buf263, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf264 = extern_kernels.convolution(buf258, buf263, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf264, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf265 = buf255 del buf255 buf266 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf268 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf264, buf265, buf266, buf268, 128, 1024, num_warps=8, num_stages=1) buf269 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf264, buf265, buf266, primals_65, primals_66, buf269, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_66 buf271 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf273 = reinterpret_tensor(buf271, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf271 buf274 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf273, primals_67, buf274, 2048, 512, num_warps=4, num_stages=1) buf275 = extern_kernels.convolution(buf269, buf274, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf275, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf276 = buf275 del buf275 triton_poi_fused_add_31[grid(524288)](buf276, buf242, 524288, XBLOCK=512, num_warps=8, num_stages=1) buf277 = buf266 del buf266 buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf280 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf276, buf277, buf278, buf280, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf281 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf276, buf277, buf278, primals_68, primals_69, buf281, 524288, XBLOCK= 512, num_warps=8, num_stages=1) del primals_69 buf283 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf285 = reinterpret_tensor(buf283, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf283 buf286 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf285, primals_70, buf286, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf287 = extern_kernels.convolution(buf281, buf286, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf287, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf288 = buf278 del buf278 buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf291 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf287, buf288, buf289, buf291, 128, 1024, num_warps=8, num_stages=1) buf292 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf287, buf288, buf289, primals_71, primals_72, buf292, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_72 buf294 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf296 = reinterpret_tensor(buf294, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf294 buf297 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_23[grid(512)](buf296, buf9, buf297, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf298 = extern_kernels.convolution(buf292, buf297, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf298, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf299 = buf289 del buf289 buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf302 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_24[grid(128)](buf298, buf299, buf300, buf302, 128, 1024, num_warps=8, num_stages=1) buf303 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf298, buf299, buf300, primals_74, primals_75, buf303, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_75 buf305 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf307 = reinterpret_tensor(buf305, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf305 buf308 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf307, primals_76, buf308, 2048, 512, num_warps=4, num_stages=1) buf309 = extern_kernels.convolution(buf303, buf308, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf309, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf310 = buf309 del buf309 triton_poi_fused_add_31[grid(524288)](buf310, buf276, 524288, XBLOCK=512, num_warps=8, num_stages=1) buf311 = buf300 del buf300 buf312 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf314 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf310, buf311, buf312, buf314, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf315 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf310, buf311, buf312, primals_77, primals_78, buf315, 524288, XBLOCK= 512, num_warps=8, num_stages=1) del primals_78 buf317 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf319 = reinterpret_tensor(buf317, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf317 buf320 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_32[grid(4096)](buf319, primals_79, buf320, 4096, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf321 = extern_kernels.convolution(buf315, buf320, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf321, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf323 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf325 = reinterpret_tensor(buf323, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf323 buf326 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_33[grid(1024)](buf325, primals_80, buf326, 1024, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf327 = extern_kernels.convolution(buf315, buf326, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf327, (4, 1024, 8, 8), (65536, 1, 8192, 1024)) buf328 = buf312 del buf312 buf329 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf331 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_34[grid(128)](buf327, buf328, buf329, buf331, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf332 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_35[grid(262144)](buf327, buf328, buf329, primals_81, primals_82, buf332, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_82 buf334 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf336 = reinterpret_tensor(buf334, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf334 buf337 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_36[grid(1024)](buf336, buf10, buf337, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf338 = extern_kernels.convolution(buf332, buf337, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf338, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf339 = buf329 del buf329 buf340 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf342 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf338, buf339, buf340, buf342, 128, 512, num_warps=4, num_stages=1) buf343 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf338, buf339, buf340, primals_84, primals_85, buf343, 65536, XBLOCK= 256, num_warps=4, num_stages=1) del primals_85 buf345 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf347 = reinterpret_tensor(buf345, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf345 buf348 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf347, primals_86, buf348, 4096, 1024, num_warps=8, num_stages=1) buf349 = extern_kernels.convolution(buf343, buf348, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf349, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf350 = buf321 del buf321 triton_poi_fused_add_40[grid(262144)](buf350, buf349, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf351 = buf340 del buf340 buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf350, buf351, buf352, buf354, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf355 = buf349 del buf349 triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf350, buf351, buf352, primals_87, primals_88, buf355, 262144, XBLOCK= 512, num_warps=8, num_stages=1) del primals_88 buf357 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf359 = reinterpret_tensor(buf357, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf357 buf360 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf359, primals_89, buf360, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf361 = extern_kernels.convolution(buf355, buf360, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf361, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf362 = buf352 del buf352 buf363 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf365 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf361, buf362, buf363, buf365, 128, 512, num_warps=4, num_stages=1) buf366 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf361, buf362, buf363, primals_90, primals_91, buf366, 65536, XBLOCK= 256, num_warps=4, num_stages=1) del primals_91 buf368 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf370 = reinterpret_tensor(buf368, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf368 buf371 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_36[grid(1024)](buf370, buf11, buf371, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf372 = extern_kernels.convolution(buf366, buf371, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf372, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf373 = buf363 del buf363 buf374 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf376 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf372, buf373, buf374, buf376, 128, 512, num_warps=4, num_stages=1) buf377 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf372, buf373, buf374, primals_93, primals_94, buf377, 65536, XBLOCK= 256, num_warps=4, num_stages=1) del primals_94 buf379 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf381 = reinterpret_tensor(buf379, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf379 buf382 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf381, primals_95, buf382, 4096, 1024, num_warps=8, num_stages=1) buf383 = extern_kernels.convolution(buf377, buf382, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf383, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf384 = buf383 del buf383 triton_poi_fused_add_44[grid(262144)](buf384, buf350, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf385 = buf374 del buf374 buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf384, buf385, buf386, buf388, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf384, buf385, buf386, primals_96, primals_97, buf389, 262144, XBLOCK= 512, num_warps=8, num_stages=1) del primals_97 buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf391 buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf393, primals_98, buf394, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf396 = buf386 del buf386 buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf395, buf396, buf397, buf399, 128, 512, num_warps=4, num_stages=1) buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf395, buf396, buf397, primals_99, primals_100, buf400, 65536, XBLOCK= 256, num_warps=4, num_stages=1) del primals_100 buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf402 buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_36[grid(1024)](buf404, buf12, buf405, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf407 = buf397 del buf397 buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf406, buf407, buf408, buf410, 128, 512, num_warps=4, num_stages=1) buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf406, buf407, buf408, primals_102, primals_103, buf411, 65536, XBLOCK =256, num_warps=4, num_stages=1) del primals_103 buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf413 buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf415, primals_104, buf416, 4096, 1024, num_warps=8, num_stages=1) buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf418 = buf417 del buf417 triton_poi_fused_add_44[grid(262144)](buf418, buf384, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf419 = buf408 del buf408 buf420 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf422 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf418, buf419, buf420, buf422, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf423 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf418, buf419, buf420, primals_105, primals_106, buf423, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_106 buf425 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf427 = reinterpret_tensor(buf425, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf425 buf428 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf427, primals_107, buf428, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf429 = extern_kernels.convolution(buf423, buf428, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf429, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf430 = buf420 del buf420 buf431 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf433 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf429, buf430, buf431, buf433, 128, 512, num_warps=4, num_stages=1) buf434 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf429, buf430, buf431, primals_108, primals_109, buf434, 65536, XBLOCK =256, num_warps=4, num_stages=1) del primals_109 buf436 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf438 = reinterpret_tensor(buf436, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf436 buf439 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_36[grid(1024)](buf438, buf13, buf439, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf440 = extern_kernels.convolution(buf434, buf439, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf440, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf441 = buf431 del buf431 buf442 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf444 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_37[grid(128)](buf440, buf441, buf442, buf444, 128, 512, num_warps=4, num_stages=1) buf445 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf440, buf441, buf442, primals_111, primals_112, buf445, 65536, XBLOCK =256, num_warps=4, num_stages=1) del primals_112 buf447 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf449 = reinterpret_tensor(buf447, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf447 buf450 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf449, primals_113, buf450, 4096, 1024, num_warps=8, num_stages=1) buf451 = extern_kernels.convolution(buf445, buf450, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf451, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf452 = buf451 del buf451 triton_poi_fused_add_44[grid(262144)](buf452, buf418, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf453 = buf442 del buf442 buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf456 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf452, buf453, buf454, buf456, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf457 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf452, buf453, buf454, primals_114, primals_115, buf457, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_115 buf459 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf461 = reinterpret_tensor(buf459, (8192, 1, 1, 1), (1, 1, 1, 1), 0) del buf459 buf462 = empty_strided_cuda((8192, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_45[grid(8192)](buf461, primals_116, buf462, 8192, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf463 = extern_kernels.convolution(buf457, buf462, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf463, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf465 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf467 = reinterpret_tensor(buf465, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf465 buf468 = empty_strided_cuda((2048, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_46[grid(2048)](buf467, primals_117, buf468, 2048, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf469 = extern_kernels.convolution(buf457, buf468, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf469, (4, 2048, 4, 4), (32768, 1, 8192, 2048)) buf470 = buf454 del buf454 buf471 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf473 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_47[grid(128)](buf469, buf470, buf471, buf473, 128, 1024, num_warps=8, num_stages=1) buf474 = empty_strided_cuda((4, 2048, 4, 4), (32768, 1, 8192, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_48[grid(131072)](buf469, buf470, buf471, primals_118, primals_119, buf474, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_119 buf476 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf478 = reinterpret_tensor(buf476, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf476 buf479 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_49[grid(2048)](buf478, buf14, buf479, 2048, 18432, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf480 = extern_kernels.convolution(buf474, buf479, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf480, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf481 = buf471 del buf471 buf482 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf484 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf480, buf481, buf482, buf484, 128, 256, num_warps=2, num_stages=1) buf485 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf480, buf481, buf482, primals_121, primals_122, buf485, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_122 buf487 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf489 = reinterpret_tensor(buf487, (8192, 1, 1, 1), (1, 1, 1, 1), 0) del buf487 buf490 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf489, primals_123, buf490, 8192, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf491 = extern_kernels.convolution(buf485, buf490, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf491, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf492 = buf463 del buf463 triton_poi_fused_add_53[grid(131072)](buf492, buf491, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf493 = buf482 del buf482 buf494 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf496 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_54[grid(128)](buf492, buf493, buf494, buf496, 128, 1024, num_warps=8, num_stages=1) buf497 = buf491 del buf491 triton_poi_fused_native_group_norm_relu_55[grid(131072)](buf492, buf493, buf494, primals_124, primals_125, buf497, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_125 buf499 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf501 = reinterpret_tensor(buf499, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf499 buf502 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_56[grid(2048)](buf501, primals_126, buf502, 2048, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf503 = extern_kernels.convolution(buf497, buf502, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf503, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf504 = buf494 del buf494 buf505 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf507 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf503, buf504, buf505, buf507, 128, 256, num_warps=2, num_stages=1) buf508 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf503, buf504, buf505, primals_127, primals_128, buf508, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_128 buf510 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf512 = reinterpret_tensor(buf510, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf510 buf513 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_49[grid(2048)](buf512, buf15, buf513, 2048, 18432, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf514 = extern_kernels.convolution(buf508, buf513, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf514, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf515 = buf505 del buf505 buf516 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf518 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf514, buf515, buf516, buf518, 128, 256, num_warps=2, num_stages=1) buf519 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf514, buf515, buf516, primals_130, primals_131, buf519, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_131 buf521 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf523 = reinterpret_tensor(buf521, (8192, 1, 1, 1), (1, 1, 1, 1), 0) del buf521 buf524 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf523, primals_132, buf524, 8192, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf525 = extern_kernels.convolution(buf519, buf524, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf525, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf526 = buf525 del buf525 triton_poi_fused_add_57[grid(131072)](buf526, buf492, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf527 = buf516 del buf516 buf528 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf530 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_54[grid(128)](buf526, buf527, buf528, buf530, 128, 1024, num_warps=8, num_stages=1) buf531 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192 ), torch.float32) triton_poi_fused_native_group_norm_relu_55[grid(131072)](buf526, buf527, buf528, primals_133, primals_134, buf531, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_134 buf533 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf535 = reinterpret_tensor(buf533, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf533 buf536 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_56[grid(2048)](buf535, primals_135, buf536, 2048, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf537 = extern_kernels.convolution(buf531, buf536, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf537, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf538 = buf528 del buf528 buf539 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf541 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf537, buf538, buf539, buf541, 128, 256, num_warps=2, num_stages=1) buf542 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf537, buf538, buf539, primals_136, primals_137, buf542, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_137 buf544 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf546 = reinterpret_tensor(buf544, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf544 buf547 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_49[grid(2048)](buf546, buf16, buf547, 2048, 18432, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf548 = extern_kernels.convolution(buf542, buf547, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf548, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf549 = buf539 del buf539 buf550 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf552 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf548, buf549, buf550, buf552, 128, 256, num_warps=2, num_stages=1) buf553 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf548, buf549, buf550, primals_139, primals_140, buf553, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_140 buf555 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf557 = reinterpret_tensor(buf555, (8192, 1, 1, 1), (1, 1, 1, 1), 0) del buf555 buf558 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf557, primals_141, buf558, 8192, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf559 = extern_kernels.convolution(buf553, buf558, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf559, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf560 = buf559 del buf559 triton_poi_fused_add_57[grid(131072)](buf560, buf526, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf561 = buf550 del buf550 buf562 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf564 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_54[grid(128)](buf560, buf561, buf562, buf564, 128, 1024, num_warps=8, num_stages=1) buf565 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192 ), torch.float32) triton_poi_fused_native_group_norm_relu_55[grid(131072)](buf560, buf561, buf562, primals_142, primals_143, buf565, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_143 buf567 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf569 = reinterpret_tensor(buf567, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf567 buf570 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_56[grid(2048)](buf569, primals_144, buf570, 2048, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf571 = extern_kernels.convolution(buf565, buf570, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf571, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf572 = buf562 del buf562 buf573 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf575 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf571, buf572, buf573, buf575, 128, 256, num_warps=2, num_stages=1) buf576 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf571, buf572, buf573, primals_145, primals_146, buf576, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_146 buf578 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf580 = reinterpret_tensor(buf578, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf578 buf581 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_49[grid(2048)](buf580, buf17, buf581, 2048, 18432, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf582 = extern_kernels.convolution(buf576, buf581, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf582, (4, 2048, 2, 2), (8192, 1, 4096, 2048)) buf583 = buf573 del buf573 buf584 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf586 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_50[grid(128)](buf582, buf583, buf584, buf586, 128, 256, num_warps=2, num_stages=1) buf587 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32) triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf582, buf583, buf584, primals_148, primals_149, buf587, 32768, XBLOCK =128, num_warps=4, num_stages=1) del primals_149 buf589 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32) buf591 = reinterpret_tensor(buf589, (8192, 1, 1, 1), (1, 1, 1, 1), 0) del buf589 buf592 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf591, primals_150, buf592, 8192, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf593 = extern_kernels.convolution(buf587, buf592, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf593, (4, 8192, 2, 2), (32768, 1, 16384, 8192)) buf594 = buf593 del buf593 triton_poi_fused_add_57[grid(131072)](buf594, buf560, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf595 = reinterpret_tensor(buf584, (4, 32, 1, 1), (32, 1, 32, 32), 0) del buf584 buf596 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf598 = reinterpret_tensor(buf596, (4, 32, 1, 1), (32, 1, 32, 32), 0) del buf596 triton_per_fused_native_group_norm_58[grid(128)](buf598, buf594, buf595, 128, 1024, num_warps=8, num_stages=1) buf599 = empty_strided_cuda((4, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32) triton_poi_fused_mean_native_group_norm_relu_59[grid(32768)](buf594, buf595, buf598, primals_151, primals_152, buf599, 32768, XBLOCK =256, num_warps=4, num_stages=1) buf600 = extern_kernels.convolution(buf599, primals_153, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf600, (4, 21843, 1, 1), (21843, 1, 21843, 21843)) buf601 = reinterpret_tensor(buf600, (4, 21843, 1, 1), (21843, 1, 87372, 87372), 0) del buf600 triton_poi_fused_convolution_60[grid(87372)](buf601, primals_154, 87372, XBLOCK=512, num_warps=8, num_stages=1) del primals_154 return (reinterpret_tensor(buf601, (4, 21843), (21843, 1), 0), buf0, buf1, primals_3, primals_5, primals_6, primals_7, buf2, primals_10, primals_12, primals_13, primals_15, primals_16, buf3, primals_19, primals_21, primals_22, primals_24, primals_25, buf4, primals_28, primals_30, primals_31, primals_33, primals_34, buf5, primals_37, primals_39, primals_40, primals_42, primals_43, primals_44, buf6, primals_47, primals_49, primals_50, primals_52, primals_53, buf7, primals_56, primals_58, primals_59, primals_61, primals_62, buf8, primals_65, primals_67, primals_68, primals_70, primals_71, buf9, primals_74, primals_76, primals_77, primals_79, primals_80, primals_81, buf10, primals_84, primals_86, primals_87, primals_89, primals_90, buf11, primals_93, primals_95, primals_96, primals_98, primals_99, buf12, primals_102, primals_104, primals_105, primals_107, primals_108, buf13, primals_111, primals_113, primals_114, primals_116, primals_117, primals_118, buf14, primals_121, primals_123, primals_124, primals_126, primals_127, buf15, primals_130, primals_132, primals_133, primals_135, primals_136, buf16, primals_139, primals_141, primals_142, primals_144, primals_145, buf17, primals_148, primals_150, primals_151, primals_152, primals_153, buf21, buf22, buf24, buf25, buf26, reinterpret_tensor(buf27, (4, 32), (32, 1), 0), reinterpret_tensor(buf30, (4, 32), (32, 1), 0), buf31, buf35, buf36, buf41, buf42, buf43, reinterpret_tensor(buf44, (4, 32), (32, 1), 0), reinterpret_tensor(buf47, (4, 32), (32, 1), 0), buf48, buf52, buf53, buf54, reinterpret_tensor(buf55, (4, 32), (32, 1), 0), reinterpret_tensor(buf58, (4, 32), (32, 1), 0), buf59, buf63, buf64, buf66, reinterpret_tensor(buf67, (4, 32), (32, 1), 0), reinterpret_tensor(buf70, (4, 32), (32, 1), 0), buf71, buf75, buf76, buf77, reinterpret_tensor(buf78, (4, 32), (32, 1), 0), reinterpret_tensor(buf81, (4, 32), (32, 1), 0), buf82, buf86, buf87, buf88, reinterpret_tensor(buf89, (4, 32), (32, 1), 0), reinterpret_tensor(buf92, (4, 32), (32, 1), 0), buf93, buf97, buf98, buf100, reinterpret_tensor(buf101, (4, 32), (32, 1), 0), reinterpret_tensor(buf104, (4, 32), (32, 1), 0), buf105, buf109, buf110, buf111, reinterpret_tensor(buf112, (4, 32), (32, 1), 0), reinterpret_tensor(buf115, (4, 32), (32, 1), 0), buf116, buf120, buf121, buf122, reinterpret_tensor(buf123, (4, 32), (32, 1), 0), reinterpret_tensor(buf126, (4, 32), (32, 1), 0), buf127, buf131, buf132, buf134, reinterpret_tensor(buf135, (4, 32), (32, 1), 0), reinterpret_tensor(buf138, (4, 32), (32, 1), 0), buf139, buf143, buf144, buf145, reinterpret_tensor(buf146, (4, 32), (32, 1), 0), reinterpret_tensor(buf149, (4, 32), (32, 1), 0), buf150, buf154, buf155, buf156, reinterpret_tensor(buf157, (4, 32), (32, 1), 0), reinterpret_tensor(buf160, (4, 32), (32, 1), 0), buf161, buf165, buf166, buf168, reinterpret_tensor(buf169, (4, 32), (32, 1), 0), reinterpret_tensor(buf172, (4, 32), (32, 1), 0), buf173, buf177, buf178, buf183, buf184, buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0), reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194, buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0), reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205, buf206, buf208, reinterpret_tensor(buf209, (4, 32), (32, 1), 0), reinterpret_tensor(buf212, (4, 32), (32, 1), 0), buf213, buf217, buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228, buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0), reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239, buf240, buf242, reinterpret_tensor(buf243, (4, 32), (32, 1), 0), reinterpret_tensor(buf246, (4, 32), (32, 1), 0), buf247, buf251, buf252, buf253, reinterpret_tensor(buf254, (4, 32), (32, 1), 0), reinterpret_tensor(buf257, (4, 32), (32, 1), 0), buf258, buf262, buf263, buf264, reinterpret_tensor(buf265, (4, 32), (32, 1), 0), reinterpret_tensor(buf268, (4, 32), (32, 1), 0), buf269, buf273, buf274, buf276, reinterpret_tensor(buf277, (4, 32), (32, 1), 0), reinterpret_tensor(buf280, (4, 32), (32, 1), 0), buf281, buf285, buf286, buf287, reinterpret_tensor(buf288, (4, 32), (32, 1), 0), reinterpret_tensor(buf291, (4, 32), (32, 1), 0), buf292, buf296, buf297, buf298, reinterpret_tensor(buf299, (4, 32), (32, 1), 0), reinterpret_tensor(buf302, (4, 32), (32, 1), 0), buf303, buf307, buf308, buf310, reinterpret_tensor(buf311, (4, 32), (32, 1), 0), reinterpret_tensor(buf314, (4, 32), (32, 1), 0), buf315, buf319, buf320, buf325, buf326, buf327, reinterpret_tensor( buf328, (4, 32), (32, 1), 0), reinterpret_tensor(buf331, (4, 32), ( 32, 1), 0), buf332, buf336, buf337, buf338, reinterpret_tensor( buf339, (4, 32), (32, 1), 0), reinterpret_tensor(buf342, (4, 32), ( 32, 1), 0), buf343, buf347, buf348, buf350, reinterpret_tensor( buf351, (4, 32), (32, 1), 0), reinterpret_tensor(buf354, (4, 32), ( 32, 1), 0), buf355, buf359, buf360, buf361, reinterpret_tensor( buf362, (4, 32), (32, 1), 0), reinterpret_tensor(buf365, (4, 32), ( 32, 1), 0), buf366, buf370, buf371, buf372, reinterpret_tensor( buf373, (4, 32), (32, 1), 0), reinterpret_tensor(buf376, (4, 32), ( 32, 1), 0), buf377, buf381, buf382, buf384, reinterpret_tensor( buf385, (4, 32), (32, 1), 0), reinterpret_tensor(buf388, (4, 32), ( 32, 1), 0), buf389, buf393, buf394, buf395, reinterpret_tensor( buf396, (4, 32), (32, 1), 0), reinterpret_tensor(buf399, (4, 32), ( 32, 1), 0), buf400, buf404, buf405, buf406, reinterpret_tensor( buf407, (4, 32), (32, 1), 0), reinterpret_tensor(buf410, (4, 32), ( 32, 1), 0), buf411, buf415, buf416, buf418, reinterpret_tensor( buf419, (4, 32), (32, 1), 0), reinterpret_tensor(buf422, (4, 32), ( 32, 1), 0), buf423, buf427, buf428, buf429, reinterpret_tensor( buf430, (4, 32), (32, 1), 0), reinterpret_tensor(buf433, (4, 32), ( 32, 1), 0), buf434, buf438, buf439, buf440, reinterpret_tensor( buf441, (4, 32), (32, 1), 0), reinterpret_tensor(buf444, (4, 32), ( 32, 1), 0), buf445, buf449, buf450, buf452, reinterpret_tensor( buf453, (4, 32), (32, 1), 0), reinterpret_tensor(buf456, (4, 32), ( 32, 1), 0), buf457, buf461, buf462, buf467, buf468, buf469, reinterpret_tensor(buf470, (4, 32), (32, 1), 0), reinterpret_tensor (buf473, (4, 32), (32, 1), 0), buf474, buf478, buf479, buf480, reinterpret_tensor(buf481, (4, 32), (32, 1), 0), reinterpret_tensor (buf484, (4, 32), (32, 1), 0), buf485, buf489, buf490, buf492, reinterpret_tensor(buf493, (4, 32), (32, 1), 0), reinterpret_tensor (buf496, (4, 32), (32, 1), 0), buf497, buf501, buf502, buf503, reinterpret_tensor(buf504, (4, 32), (32, 1), 0), reinterpret_tensor (buf507, (4, 32), (32, 1), 0), buf508, buf512, buf513, buf514, reinterpret_tensor(buf515, (4, 32), (32, 1), 0), reinterpret_tensor (buf518, (4, 32), (32, 1), 0), buf519, buf523, buf524, buf526, reinterpret_tensor(buf527, (4, 32), (32, 1), 0), reinterpret_tensor (buf530, (4, 32), (32, 1), 0), buf531, buf535, buf536, buf537, reinterpret_tensor(buf538, (4, 32), (32, 1), 0), reinterpret_tensor (buf541, (4, 32), (32, 1), 0), buf542, buf546, buf547, buf548, reinterpret_tensor(buf549, (4, 32), (32, 1), 0), reinterpret_tensor (buf552, (4, 32), (32, 1), 0), buf553, buf557, buf558, buf560, reinterpret_tensor(buf561, (4, 32), (32, 1), 0), reinterpret_tensor (buf564, (4, 32), (32, 1), 0), buf565, buf569, buf570, buf571, reinterpret_tensor(buf572, (4, 32), (32, 1), 0), reinterpret_tensor (buf575, (4, 32), (32, 1), 0), buf576, buf580, buf581, buf582, reinterpret_tensor(buf583, (4, 32), (32, 1), 0), reinterpret_tensor (buf586, (4, 32), (32, 1), 0), buf587, buf591, buf592, buf594, buf595, buf598, buf599) def conv3x3(cin, cout, stride=1, groups=1, bias=False): return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) def conv1x1(cin, cout, stride=1, bias=False): return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias) def tf2th(conv_weights): """Possibly convert HWIO to OIHW.""" if conv_weights.ndim == 4: conv_weights = conv_weights.transpose([3, 2, 0, 1]) return torch.from_numpy(conv_weights) class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-10) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) class PreActBottleneck(nn.Module): """Pre-activation (v2) bottleneck block. Follows the implementation of "Identity Mappings in Deep Residual Networks": https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua Except it puts the stride on 3x3 conv when available. """ def __init__(self, cin, cout=None, cmid=None, stride=1): super().__init__() cout = cout or cin cmid = cmid or cout // 4 self.gn1 = nn.GroupNorm(32, cin) self.conv1 = conv1x1(cin, cmid) self.gn2 = nn.GroupNorm(32, cmid) self.conv2 = conv3x3(cmid, cmid, stride) self.gn3 = nn.GroupNorm(32, cmid) self.conv3 = conv1x1(cmid, cout) self.relu = nn.ReLU(inplace=True) if stride != 1 or cin != cout: self.downsample = conv1x1(cin, cout, stride) def forward(self, x): out = self.relu(self.gn1(x)) residual = x if hasattr(self, 'downsample'): residual = self.downsample(out) out = self.conv1(out) out = self.conv2(self.relu(self.gn2(out))) out = self.conv3(self.relu(self.gn3(out))) return out + residual def load_from(self, weights, prefix=''): convname = 'standardized_conv2d' with torch.no_grad(): self.conv1.weight.copy_(tf2th(weights[ f'{prefix}a/{convname}/kernel'])) self.conv2.weight.copy_(tf2th(weights[ f'{prefix}b/{convname}/kernel'])) self.conv3.weight.copy_(tf2th(weights[ f'{prefix}c/{convname}/kernel'])) self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma']) ) self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma']) ) self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma']) ) self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta'])) self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta'])) self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta'])) if hasattr(self, 'downsample'): w = weights[f'{prefix}a/proj/{convname}/kernel'] self.downsample.weight.copy_(tf2th(w)) class ResNetV2New(nn.Module): """Implementation of Pre-activation (v2) ResNet mode.""" def __init__(self, block_units, width_factor, head_size=21843, zero_head=False): super().__init__() wf = width_factor self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 * wf, kernel_size=7, stride=2, padding=3, bias=False)), ('pad', nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential( OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 * wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin= 256 * wf, cout=256 * wf, cmid=64 * wf)) for i in range(2, block_units[0] + 1)]))), ('block2', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid= 128 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin= 512 * wf, cout=512 * wf, cmid=128 * wf)) for i in range(2, block_units[1] + 1)]))), ('block3', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid= 256 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin= 1024 * wf, cout=1024 * wf, cmid=256 * wf)) for i in range(2, block_units[2] + 1)]))), ('block4', nn.Sequential(OrderedDict([ ('unit01', PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid =512 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin =2048 * wf, cout=2048 * wf, cmid=512 * wf)) for i in range(2, block_units[3] + 1)])))])) self.zero_head = zero_head self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, 2048 * wf)), ('relu', nn.ReLU(inplace=True)), ('avg', nn. AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d(2048 * wf, head_size, kernel_size=1, bias=True))])) def load_from(self, weights, prefix='resnet/'): with torch.no_grad(): self.root.conv.weight.copy_(tf2th(weights[ f'{prefix}root_block/standardized_conv2d/kernel'])) self.head.gn.weight.copy_(tf2th(weights[ f'{prefix}group_norm/gamma'])) self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta'])) if self.zero_head: nn.init.zeros_(self.head.conv.weight) nn.init.zeros_(self.head.conv.bias) else: self.head.conv.weight.copy_(tf2th(weights[ f'{prefix}head/conv2d/kernel'])) self.head.conv.bias.copy_(tf2th(weights[ f'{prefix}head/conv2d/bias'])) for bname, block in self.body.named_children(): for uname, unit in block.named_children(): unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/') def forward(self, input_0): primals_1 = self.root.conv.weight primals_3 = self.body.block1.unit01.gn1.weight primals_4 = self.body.block1.unit01.gn1.bias primals_6 = self.body.block1.unit01.conv1.weight primals_7 = self.body.block1.unit01.gn2.weight primals_8 = self.body.block1.unit01.gn2.bias primals_9 = self.body.block1.unit01.conv2.weight primals_10 = self.body.block1.unit01.gn3.weight primals_11 = self.body.block1.unit01.gn3.bias primals_5 = self.body.block1.unit01.conv3.weight primals_12 = self.body.block1.unit01.downsample.weight primals_13 = self.body.block1.unit02.gn1.weight primals_14 = self.body.block1.unit02.gn1.bias primals_15 = self.body.block1.unit02.conv1.weight primals_16 = self.body.block1.unit02.gn2.weight primals_17 = self.body.block1.unit02.gn2.bias primals_18 = self.body.block1.unit02.conv2.weight primals_19 = self.body.block1.unit02.gn3.weight primals_20 = self.body.block1.unit02.gn3.bias primals_21 = self.body.block1.unit02.conv3.weight primals_22 = self.body.block1.unit03.gn1.weight primals_23 = self.body.block1.unit03.gn1.bias primals_24 = self.body.block1.unit03.conv1.weight primals_25 = self.body.block1.unit03.gn2.weight primals_26 = self.body.block1.unit03.gn2.bias primals_27 = self.body.block1.unit03.conv2.weight primals_28 = self.body.block1.unit03.gn3.weight primals_29 = self.body.block1.unit03.gn3.bias primals_30 = self.body.block1.unit03.conv3.weight primals_31 = self.body.block1.unit04.gn1.weight primals_32 = self.body.block1.unit04.gn1.bias primals_33 = self.body.block1.unit04.conv1.weight primals_34 = self.body.block1.unit04.gn2.weight primals_35 = self.body.block1.unit04.gn2.bias primals_36 = self.body.block1.unit04.conv2.weight primals_37 = self.body.block1.unit04.gn3.weight primals_38 = self.body.block1.unit04.gn3.bias primals_39 = self.body.block1.unit04.conv3.weight primals_40 = self.body.block2.unit01.gn1.weight primals_41 = self.body.block2.unit01.gn1.bias primals_43 = self.body.block2.unit01.conv1.weight primals_44 = self.body.block2.unit01.gn2.weight primals_45 = self.body.block2.unit01.gn2.bias primals_46 = self.body.block2.unit01.conv2.weight primals_47 = self.body.block2.unit01.gn3.weight primals_48 = self.body.block2.unit01.gn3.bias primals_49 = self.body.block2.unit01.conv3.weight primals_42 = self.body.block2.unit01.downsample.weight primals_50 = self.body.block2.unit02.gn1.weight primals_51 = self.body.block2.unit02.gn1.bias primals_52 = self.body.block2.unit02.conv1.weight primals_53 = self.body.block2.unit02.gn2.weight primals_54 = self.body.block2.unit02.gn2.bias primals_55 = self.body.block2.unit02.conv2.weight primals_56 = self.body.block2.unit02.gn3.weight primals_57 = self.body.block2.unit02.gn3.bias primals_58 = self.body.block2.unit02.conv3.weight primals_59 = self.body.block2.unit03.gn1.weight primals_60 = self.body.block2.unit03.gn1.bias primals_61 = self.body.block2.unit03.conv1.weight primals_62 = self.body.block2.unit03.gn2.weight primals_63 = self.body.block2.unit03.gn2.bias primals_64 = self.body.block2.unit03.conv2.weight primals_65 = self.body.block2.unit03.gn3.weight primals_66 = self.body.block2.unit03.gn3.bias primals_67 = self.body.block2.unit03.conv3.weight primals_68 = self.body.block2.unit04.gn1.weight primals_69 = self.body.block2.unit04.gn1.bias primals_70 = self.body.block2.unit04.conv1.weight primals_71 = self.body.block2.unit04.gn2.weight primals_72 = self.body.block2.unit04.gn2.bias primals_73 = self.body.block2.unit04.conv2.weight primals_74 = self.body.block2.unit04.gn3.weight primals_75 = self.body.block2.unit04.gn3.bias primals_76 = self.body.block2.unit04.conv3.weight primals_77 = self.body.block3.unit01.gn1.weight primals_78 = self.body.block3.unit01.gn1.bias primals_80 = self.body.block3.unit01.conv1.weight primals_81 = self.body.block3.unit01.gn2.weight primals_82 = self.body.block3.unit01.gn2.bias primals_83 = self.body.block3.unit01.conv2.weight primals_84 = self.body.block3.unit01.gn3.weight primals_85 = self.body.block3.unit01.gn3.bias primals_86 = self.body.block3.unit01.conv3.weight primals_79 = self.body.block3.unit01.downsample.weight primals_87 = self.body.block3.unit02.gn1.weight primals_88 = self.body.block3.unit02.gn1.bias primals_89 = self.body.block3.unit02.conv1.weight primals_90 = self.body.block3.unit02.gn2.weight primals_91 = self.body.block3.unit02.gn2.bias primals_92 = self.body.block3.unit02.conv2.weight primals_93 = self.body.block3.unit02.gn3.weight primals_94 = self.body.block3.unit02.gn3.bias primals_95 = self.body.block3.unit02.conv3.weight primals_96 = self.body.block3.unit03.gn1.weight primals_97 = self.body.block3.unit03.gn1.bias primals_98 = self.body.block3.unit03.conv1.weight primals_99 = self.body.block3.unit03.gn2.weight primals_100 = self.body.block3.unit03.gn2.bias primals_101 = self.body.block3.unit03.conv2.weight primals_102 = self.body.block3.unit03.gn3.weight primals_103 = self.body.block3.unit03.gn3.bias primals_104 = self.body.block3.unit03.conv3.weight primals_105 = self.body.block3.unit04.gn1.weight primals_106 = self.body.block3.unit04.gn1.bias primals_107 = self.body.block3.unit04.conv1.weight primals_108 = self.body.block3.unit04.gn2.weight primals_109 = self.body.block3.unit04.gn2.bias primals_110 = self.body.block3.unit04.conv2.weight primals_111 = self.body.block3.unit04.gn3.weight primals_112 = self.body.block3.unit04.gn3.bias primals_113 = self.body.block3.unit04.conv3.weight primals_114 = self.body.block4.unit01.gn1.weight primals_115 = self.body.block4.unit01.gn1.bias primals_117 = self.body.block4.unit01.conv1.weight primals_118 = self.body.block4.unit01.gn2.weight primals_119 = self.body.block4.unit01.gn2.bias primals_120 = self.body.block4.unit01.conv2.weight primals_121 = self.body.block4.unit01.gn3.weight primals_122 = self.body.block4.unit01.gn3.bias primals_123 = self.body.block4.unit01.conv3.weight primals_116 = self.body.block4.unit01.downsample.weight primals_124 = self.body.block4.unit02.gn1.weight primals_125 = self.body.block4.unit02.gn1.bias primals_126 = self.body.block4.unit02.conv1.weight primals_127 = self.body.block4.unit02.gn2.weight primals_128 = self.body.block4.unit02.gn2.bias primals_129 = self.body.block4.unit02.conv2.weight primals_130 = self.body.block4.unit02.gn3.weight primals_131 = self.body.block4.unit02.gn3.bias primals_132 = self.body.block4.unit02.conv3.weight primals_133 = self.body.block4.unit03.gn1.weight primals_134 = self.body.block4.unit03.gn1.bias primals_135 = self.body.block4.unit03.conv1.weight primals_136 = self.body.block4.unit03.gn2.weight primals_137 = self.body.block4.unit03.gn2.bias primals_138 = self.body.block4.unit03.conv2.weight primals_139 = self.body.block4.unit03.gn3.weight primals_140 = self.body.block4.unit03.gn3.bias primals_141 = self.body.block4.unit03.conv3.weight primals_142 = self.body.block4.unit04.gn1.weight primals_143 = self.body.block4.unit04.gn1.bias primals_144 = self.body.block4.unit04.conv1.weight primals_145 = self.body.block4.unit04.gn2.weight primals_146 = self.body.block4.unit04.gn2.bias primals_147 = self.body.block4.unit04.conv2.weight primals_148 = self.body.block4.unit04.gn3.weight primals_149 = self.body.block4.unit04.gn3.bias primals_150 = self.body.block4.unit04.conv3.weight primals_151 = self.head.gn.weight primals_152 = self.head.gn.bias primals_153 = self.head.conv.weight primals_154 = self.head.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154]) return output[0]
RicJM/weighted_c2d
ResNetV2
false
15,389
[ "MIT" ]
49
38053869b77c1544349c53ba6f3c1325254aa413
https://github.com/RicJM/weighted_c2d/tree/38053869b77c1544349c53ba6f3c1325254aa413
Capsule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/2v/c2vvufq37bmgefm4ulrxp3p5xqkijxqqy4mfnu6hyyswltlccjyr.py # Topologically Sorted Source Nodes: [b, c], Original ATen: [aten.zeros_like, aten._softmax] # Source node to ATen node mapping: # b => full_default # c => amax, div, exp, sub, sum_1 # Graph fragment: # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%full_default, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_zeros_like_0 = async_compile.triton('triton_poi_fused__softmax_zeros_like_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_zeros_like_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_zeros_like_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tmp1 = tl_math.exp(tmp0) tmp2 = tmp1 + tmp1 tmp3 = tmp2 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp1 / tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ah/cahcbdgzcypclgrmenrcgftl53kemvcm53v6yoxzwdqjyblrincb.py # Topologically Sorted Source Nodes: [v], Original ATen: [aten.clone] # Source node to ATen node mapping: # v => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_4,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bx/cbxiguk7akpsnxphmhksnygtcqzhzbhxjtnwtcoz74eely3k7dv6.py # Topologically Sorted Source Nodes: [x_norm, mag, truediv, mul, add, out], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.div, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mag => pow_3 # mul => mul # out => div_2 # truediv => div_1 # x_norm => pow_1, pow_2, sum_2 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_6, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %pow_3 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_6, %pow_2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %pow_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, 1), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tmp14 = tmp12 * tmp12 tmp15 = tmp13 * tmp14 tmp16 = 1.0 tmp17 = tmp14 + tmp16 tmp18 = tmp15 / tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2s/c2s3zo6qtbodb6bdwv46ozxj4nxxymp76igm7emvdafvrj3673sn.py # Topologically Sorted Source Nodes: [b_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # b_1 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_9,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ue/cuejnjfin2toe55demka6k23rwkmjoo3bhbrujl4vsplhq5qsjow.py # Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # c_1 => amax_1, exp_1, sub_1 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_10, [1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_10, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5l/c5lhvbzqt26cvji7ae3ignfy7lym2byxmpvr2n6f2tboe4hpbwcv.py # Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # c_1 => div_3, sum_3 # Graph fragment: # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_3), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 16), (64, 16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [u], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (16, 1), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [b, c], Original ATen: [aten.zeros_like, aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_zeros_like_0.run(buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf0, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_norm, mag, truediv, mul, add, out], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.div, aten.mul, aten.add] triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [b_1], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf0, buf5, 64, 4, grid=grid(64, 4), stream=stream0) del buf0 buf6 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [b_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf6, buf7, 64, grid=grid(64), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [c_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 1, 4), (4, 4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [v_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_norm_1, mag_1, truediv_2, mul_1, add_1, out_1], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.div, aten.mul, aten.add] triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2.run(buf9, buf10, 64, grid=grid(64), stream=stream0) buf11 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [b_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf10, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [c_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf11, buf12, 64, grid=grid(64), stream=stream0) buf13 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [c_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf12, buf13, 64, grid=grid(64), stream=stream0) buf14 = reinterpret_tensor(buf12, (16, 1, 4), (4, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [v_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf13, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_norm_2, mag_2, truediv_4, mul_2, add_2, out_2], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.div, aten.mul, aten.add] triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2.run(buf14, buf15, 64, grid=grid(64), stream=stream0) buf16 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [b_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf15, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf16) buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [c_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf16, buf17, 64, grid=grid(64), stream=stream0) buf18 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [c_3], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf17, buf18, 64, grid=grid(64), stream=stream0) buf19 = reinterpret_tensor(buf17, (16, 1, 4), (4, 4, 1), 0); del buf17 # reuse # Topologically Sorted Source Nodes: [v_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf18, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf19) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_norm_3, mag_3, truediv_6, mul_3, add_3, out_3], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.div, aten.mul, aten.add] triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2.run(buf19, buf20, 64, grid=grid(64), stream=stream0) return (buf20, buf3, buf4, buf8, buf9, buf10, buf13, buf14, buf15, buf18, buf19, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 16), (64, 16, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Capsule(nn.Module): def __init__(self, cfg): super(Capsule, self).__init__() self.input_dim_capsule = cfg.input_dim_capsule self.dim_capsule = cfg.dim_capsule self.num_capsule = cfg.num_capsule self.batch_size = cfg.batch_size self.share_weights = cfg.share_weights self.num_iterations = cfg.num_iterations if self.share_weights: W = torch.zeros(1, self.input_dim_capsule, self.num_capsule * self.dim_capsule) else: W = torch.zeros(self.batch_size, self.input_dim_capsule, self. num_capsule * self.dim_capsule) W = nn.init.xavier_normal_(W) self.W = nn.Parameter(W) def forward(self, x): """ x: [B, L, H] # 从 CNN / RNN 得到的结果 L 作为 input_num_capsules, H 作为 input_dim_capsule """ B, I, _ = x.size() O, F = self.num_capsule, self.dim_capsule u = torch.matmul(x, self.W) u = u.view(B, I, O, F).transpose(1, 2) b = torch.zeros_like(u[:, :, :, 0]) for i in range(self.num_iterations): c = torch.softmax(b, dim=1) v = torch.einsum('boi,boif->bof', [c, u]) v = self.squash(v) b = torch.einsum('bof,boif->boi', [v, u]) return v @staticmethod def squash(x: 'torch.Tensor'): x_norm = x.norm(p=2, dim=-1, keepdim=True) mag = x_norm ** 2 out = x / x_norm * mag / (1 + mag) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(input_dim_capsule=4, dim_capsule=4, num_capsule=4, batch_size=4, share_weights=4, num_iterations=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_zeros_like_0(out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tmp1 = tl_math.exp(tmp0) tmp2 = tmp1 + tmp1 tmp3 = tmp2 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp1 / tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tmp14 = tmp12 * tmp12 tmp15 = tmp13 * tmp14 tmp16 = 1.0 tmp17 = tmp14 + tmp16 tmp18 = tmp15 / tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 16), (64, 16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (16, 1), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_zeros_like_0[grid(64)](buf1, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](buf0, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(64, 4)](buf0, buf5, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del buf0 buf6 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(64)](buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0) del buf6 triton_poi_fused__softmax_5[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 1, 4), (4, 4, 1), 0) del buf7 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2[grid(64)](buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf10, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0) del buf11 triton_poi_fused__softmax_5[grid(64)](buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = reinterpret_tensor(buf12, (16, 1, 4), (4, 4, 1), 0) del buf12 extern_kernels.bmm(reinterpret_tensor(buf13, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf14) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2[grid(64)](buf14, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf15, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf16) buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(64)](buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = reinterpret_tensor(buf16, (4, 4, 4), (16, 4, 1), 0) del buf16 triton_poi_fused__softmax_5[grid(64)](buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = reinterpret_tensor(buf17, (16, 1, 4), (4, 4, 1), 0) del buf17 extern_kernels.bmm(reinterpret_tensor(buf18, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf19) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_linalg_vector_norm_mul_pow_2[grid(64)](buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1) return (buf20, buf3, buf4, buf8, buf9, buf10, buf13, buf14, buf15, buf18, buf19, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0)) class CapsuleNew(nn.Module): def __init__(self, cfg): super(CapsuleNew, self).__init__() self.input_dim_capsule = cfg.input_dim_capsule self.dim_capsule = cfg.dim_capsule self.num_capsule = cfg.num_capsule self.batch_size = cfg.batch_size self.share_weights = cfg.share_weights self.num_iterations = cfg.num_iterations if self.share_weights: W = torch.zeros(1, self.input_dim_capsule, self.num_capsule * self.dim_capsule) else: W = torch.zeros(self.batch_size, self.input_dim_capsule, self. num_capsule * self.dim_capsule) W = nn.init.xavier_normal_(W) self.W = nn.Parameter(W) @staticmethod def squash(x: 'torch.Tensor'): x_norm = x.norm(p=2, dim=-1, keepdim=True) mag = x_norm ** 2 out = x / x_norm * mag / (1 + mag) return out def forward(self, input_0): primals_2 = self.W primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
fmc123653/DeepKE
Capsule
false
15,390
[ "MIT" ]
676
4d30e51368681c7cb73e2ecacf9b922b441cbe99
https://github.com/fmc123653/DeepKE/tree/4d30e51368681c7cb73e2ecacf9b922b441cbe99
BalancedLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r5/cr572xaijqfuet7ihewbrvzmu7izr57vbycuj3ty743zbl7fw3t6.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, weight, pos_mask, sum_1, pos_num, truediv, setitem, neg_mask, sum_2, neg_num, truediv_1, mul, setitem_1, sum_3, weight_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.zeros, aten.eq, aten.sum, aten._to_copy, aten.reciprocal, aten.mul, aten.index_put, aten.div] # Source node to ATen node mapping: # binary_cross_entropy_with_logits => abs_1, exp, full_default_1, log1p, minimum, mul_3, mul_4, neg, sub, sub_1, sub_2, sum_4 # mul => mul_2 # neg_mask => eq_1 # neg_num => convert_element_type_1 # pos_mask => eq # pos_num => convert_element_type # setitem => index_put # setitem_1 => index_put_1 # sum_1 => sum_1 # sum_2 => sum_2 # sum_3 => sum_3 # truediv => mul, reciprocal # truediv_1 => mul_1, reciprocal_1 # weight => full_default # weight_1 => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %sub_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg0_1, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%eq,), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_1, torch.float32), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%convert_element_type,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1), kwargs = {}) # %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%full_default, [%eq], %mul), kwargs = {}) # %eq_1 : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg0_1, 1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%eq_1,), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_2, torch.float32), kwargs = {}) # %reciprocal_1 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%convert_element_type_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_1, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, 1.0), kwargs = {}) # %index_put_1 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put, [%eq_1], %mul_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%index_put_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%index_put_1, %sum_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %div), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_4,), kwargs = {}) triton_per_fused__to_copy_binary_cross_entropy_with_logits_div_eq_index_put_mul_reciprocal_sum_zeros_0 = async_compile.triton('triton_per_fused__to_copy_binary_cross_entropy_with_logits_div_eq_index_put_mul_reciprocal_sum_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_binary_cross_entropy_with_logits_div_eq_index_put_mul_reciprocal_sum_zeros_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_binary_cross_entropy_with_logits_div_eq_index_put_mul_reciprocal_sum_zeros_0(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp27 = tl.load(in_ptr1 + (r0), None) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp3 = tmp2.to(tl.int64) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 1.0 tmp8 = tmp0 == tmp7 tmp9 = tmp8.to(tl.int64) tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = tmp6.to(tl.float32) tmp14 = tl.full([1], 1, tl.int32) tmp15 = tmp14 / tmp13 tmp16 = tmp15 * tmp7 tmp17 = tl.where(tmp2, tmp16, tmp1) tmp18 = tmp12.to(tl.float32) tmp19 = tmp14 / tmp18 tmp20 = tmp19 * tmp7 tmp21 = tmp20 * tmp7 tmp22 = tl.where(tmp8, tmp21, tmp17) tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = tmp7 - tmp0 tmp28 = tmp26 * tmp27 tmp29 = triton_helpers.minimum(tmp1, tmp27) tmp30 = tl_math.abs(tmp27) tmp31 = -tmp30 tmp32 = tl_math.exp(tmp31) tmp33 = libdevice.log1p(tmp32) tmp34 = tmp29 - tmp33 tmp35 = tmp28 - tmp34 tmp36 = tmp22 / tmp25 tmp37 = tmp35 * tmp36 tmp38 = tl.broadcast_to(tmp37, [RBLOCK]) tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0)) tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp40, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, weight, pos_mask, sum_1, pos_num, truediv, setitem, neg_mask, sum_2, neg_num, truediv_1, mul, setitem_1, sum_3, weight_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.zeros, aten.eq, aten.sum, aten._to_copy, aten.reciprocal, aten.mul, aten.index_put, aten.div] stream0 = get_raw_stream(0) triton_per_fused__to_copy_binary_cross_entropy_with_logits_div_eq_index_put_mul_reciprocal_sum_zeros_0.run(buf5, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class BalancedLoss(nn.Module): def __init__(self, neg_weight=1.0): super(BalancedLoss, self).__init__() self.neg_weight = neg_weight def forward(self, input, target): pos_mask = target == 0 neg_mask = target == 1 pos_num = pos_mask.sum().float() neg_num = neg_mask.sum().float() weight = torch.zeros(target.size(), dtype=torch.float32, device= target.device) weight[pos_mask] = 1 / pos_num weight[neg_mask] = 1 / neg_num * self.neg_weight weight /= weight.sum() return F.binary_cross_entropy_with_logits(input, target.float(), weight, reduction='sum') def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_binary_cross_entropy_with_logits_div_eq_index_put_mul_reciprocal_sum_zeros_0( in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp27 = tl.load(in_ptr1 + r0, None) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp3 = tmp2.to(tl.int64) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 1.0 tmp8 = tmp0 == tmp7 tmp9 = tmp8.to(tl.int64) tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = tmp6.to(tl.float32) tmp14 = tl.full([1], 1, tl.int32) tmp15 = tmp14 / tmp13 tmp16 = tmp15 * tmp7 tmp17 = tl.where(tmp2, tmp16, tmp1) tmp18 = tmp12.to(tl.float32) tmp19 = tmp14 / tmp18 tmp20 = tmp19 * tmp7 tmp21 = tmp20 * tmp7 tmp22 = tl.where(tmp8, tmp21, tmp17) tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = tmp7 - tmp0 tmp28 = tmp26 * tmp27 tmp29 = triton_helpers.minimum(tmp1, tmp27) tmp30 = tl_math.abs(tmp27) tmp31 = -tmp30 tmp32 = tl_math.exp(tmp31) tmp33 = libdevice.log1p(tmp32) tmp34 = tmp29 - tmp33 tmp35 = tmp28 - tmp34 tmp36 = tmp22 / tmp25 tmp37 = tmp35 * tmp36 tmp38 = tl.broadcast_to(tmp37, [RBLOCK]) tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0)) tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp40, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 get_raw_stream(0) triton_per_fused__to_copy_binary_cross_entropy_with_logits_div_eq_index_put_mul_reciprocal_sum_zeros_0[ grid(1)](buf5, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf5, class BalancedLossNew(nn.Module): def __init__(self, neg_weight=1.0): super(BalancedLossNew, self).__init__() self.neg_weight = neg_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
gabrielsluz/vince
BalancedLoss
false
15,391
[ "Apache-2.0" ]
61
f4e17a2cf70c080a7e01e46d15537e33224c869b
https://github.com/gabrielsluz/vince/tree/f4e17a2cf70c080a7e01e46d15537e33224c869b
GAE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] # Source node to ATen node mapping: # policy => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] # Source node to ATen node mapping: # policy => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [policy], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_7 return (buf4, reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf4, primals_6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class GAE(nn.Module): def __init__(self, num_inputs, num_outputs): super(GAE, self).__init__() self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc = nn.Linear(num_inputs, 128) self.fc_actor = nn.Linear(128, num_outputs) self.fc_critic = nn.Linear(128, 1) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) def forward(self, input): x = F.relu(self.fc(input)) policy = F.softmax(self.fc_actor(x)) value = self.fc_critic(x) return policy, value @classmethod def get_gae(self, values, rewards, masks): returns = torch.zeros_like(rewards) advantages = torch.zeros_like(rewards) running_return = 0 previous_value = 0 running_advantage = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] running_tderror = rewards[t] + gamma * previous_value * masks[t ] - values.data[t] running_advantage = (running_tderror + gamma * lambda_gae * running_advantage * masks[t]) returns[t] = running_return previous_value = values.data[t] advantages[t] = running_advantage return returns, advantages @classmethod def train_model(cls, net, transitions, optimizer): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) policies, values = net(states) policies = policies.view(-1, net.num_outputs) values = values.view(-1) returns, advantages = net.get_gae(values.view(-1).detach(), rewards, masks) log_policies = (torch.log(policies) * actions.detach()).sum(dim=1) actor_loss = -(log_policies * advantages).sum() critic_loss = (returns.detach() - values).pow(2).sum() entropy = (torch.log(policies) * policies).sum(1).sum() loss = (actor_loss + ciritic_coefficient * critic_loss - entropy_coefficient * entropy) optimizer.zero_grad() loss.backward() optimizer.step() return loss def get_action(self, input): policy, _ = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf7, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_7 return buf4, reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf4, primals_6, primals_4, buf7 class GAENew(nn.Module): def __init__(self, num_inputs, num_outputs): super(GAENew, self).__init__() self.num_inputs = num_inputs self.num_outputs = num_outputs self.fc = nn.Linear(num_inputs, 128) self.fc_actor = nn.Linear(128, num_outputs) self.fc_critic = nn.Linear(128, 1) for m in self.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform(m.weight) @classmethod def get_gae(self, values, rewards, masks): returns = torch.zeros_like(rewards) advantages = torch.zeros_like(rewards) running_return = 0 previous_value = 0 running_advantage = 0 for t in reversed(range(len(rewards))): running_return = rewards[t] + gamma * running_return * masks[t] running_tderror = rewards[t] + gamma * previous_value * masks[t ] - values.data[t] running_advantage = (running_tderror + gamma * lambda_gae * running_advantage * masks[t]) returns[t] = running_return previous_value = values.data[t] advantages[t] = running_advantage return returns, advantages @classmethod def train_model(cls, net, transitions, optimizer): states, actions, rewards, masks = (transitions.state, transitions. action, transitions.reward, transitions.mask) states = torch.stack(states) actions = torch.stack(actions) rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) policies, values = net(states) policies = policies.view(-1, net.num_outputs) values = values.view(-1) returns, advantages = net.get_gae(values.view(-1).detach(), rewards, masks) log_policies = (torch.log(policies) * actions.detach()).sum(dim=1) actor_loss = -(log_policies * advantages).sum() critic_loss = (returns.detach() - values).pow(2).sum() entropy = (torch.log(policies) * policies).sum(1).sum() loss = (actor_loss + ciritic_coefficient * critic_loss - entropy_coefficient * entropy) optimizer.zero_grad() loss.backward() optimizer.step() return loss def get_action(self, input): policy, _ = self.forward(input) policy = policy[0].data.numpy() action = np.random.choice(self.num_outputs, 1, p=policy)[0] return action def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_4 = self.fc_actor.weight primals_5 = self.fc_actor.bias primals_6 = self.fc_critic.weight primals_7 = self.fc_critic.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
g6ling/Pytorch-Cartpole
GAE
false
15,392
[ "MIT" ]
116
ecb7b622cfefe825ac95388cceb6752413d90a2a
https://github.com/g6ling/Pytorch-Cartpole/tree/ecb7b622cfefe825ac95388cceb6752413d90a2a
TemperatureHolder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xp/cxpd2bcgjjguvsbnowhprcscim22tcfdskii4vimpwk2r73oeben.py # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] # Source node to ATen node mapping: # exp => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_exp_0 = async_compile.triton('triton_poi_fused_exp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl_math.exp(tmp1) tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_exp_0.run(primals_1, buf0, 1, grid=grid(1), stream=stream0) del primals_1 return (buf0, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class TemperatureHolder(nn.Module): """Module that holds a temperature as a learnable value. Args: initial_log_temperature (float): Initial value of log(temperature). """ def __init__(self, initial_log_temperature=0): super().__init__() self.log_temperature = nn.Parameter(torch.tensor( initial_log_temperature, dtype=torch.float32)) def forward(self): """Return a temperature as a torch.Tensor.""" return torch.exp(self.log_temperature) def get_inputs(): return [] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl_math.exp(tmp1) tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp2, None) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(1)](primals_1, buf0, 1, XBLOCK=1, num_warps=1, num_stages=1) del primals_1 return buf0, buf0 class TemperatureHolderNew(nn.Module): """Module that holds a temperature as a learnable value. Args: initial_log_temperature (float): Initial value of log(temperature). """ def __init__(self, initial_log_temperature=0): super().__init__() self.log_temperature = nn.Parameter(torch.tensor( initial_log_temperature, dtype=torch.float32)) def forward(self): primals_1 = self.log_temperature output = call([primals_1]) return output[0]
g-votte/pfrl
TemperatureHolder
false
15,393
[ "MIT" ]
824
4c30c1d73f0941a2b649b62937eec346bb55a95e
https://github.com/g-votte/pfrl/tree/4c30c1d73f0941a2b649b62937eec346bb55a95e
ConvCompress
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py # Topologically Sorted Source Nodes: [compressed_mem], Original ATen: [aten.convolution] # Source node to ATen node mapping: # compressed_mem => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [4], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py # Topologically Sorted Source Nodes: [compressed_mem], Original ATen: [aten.convolution] # Source node to ATen node mapping: # compressed_mem => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [4], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [compressed_mem], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) # Topologically Sorted Source Nodes: [compressed_mem], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(4,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) del buf0 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [compressed_mem], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class ConvCompress(nn.Module): def __init__(self, dim, ratio=4): super().__init__() self.conv = nn.Conv1d(dim, dim, ratio, stride=ratio) def forward(self, mem): mem = mem.transpose(1, 2) compressed_mem = self.conv(mem) return compressed_mem.transpose(1, 2) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(4,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1), (4, 1, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0) class ConvCompressNew(nn.Module): def __init__(self, dim, ratio=4): super().__init__() self.conv = nn.Conv1d(dim, dim, ratio, stride=ratio) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
fwka92/compressive-transformer-pytorch
ConvCompress
false
15,394
[ "MIT" ]
108
e51faba52a8c1ec6a8b966e5b912e6ecc3840f57
https://github.com/fwka92/compressive-transformer-pytorch/tree/e51faba52a8c1ec6a8b966e5b912e6ecc3840f57
ImageToTensor
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/kq/ckqzjwsdojunslaxexvivgmdizmhm6czh2xpzemjckiggsrasgr3.py # Topologically Sorted Source Nodes: [result], Original ATen: [aten.clone] # Source node to ATen node mapping: # result => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tl.store(out_ptr0 + (x2 + (16*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [result], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.optim import torch.nn as nn import torch.nn.utils import torch.autograd class BaseMetric: """ Base class for all the metrics """ def __init__(self, name): self.name = name def calculate(self, batch_info): """ Calculate value of a metric based on supplied data """ raise NotImplementedError def reset(self): """ Reset value of a metric """ raise NotImplementedError def value(self): """ Return current value for the metric """ raise NotImplementedError def write_state_dict(self, training_info: 'TrainingInfo', hidden_state_dict: 'dict') ->None: """ Potentially store some metric state to the checkpoint """ pass def load_state_dict(self, training_info: 'TrainingInfo', hidden_state_dict: 'dict') ->None: """ Potentially load some metric state from the checkpoint """ pass class AveragingMetric(BaseMetric): """ Base class for metrics that simply calculate the average over the epoch """ def __init__(self, name): super().__init__(name) self.storage = [] def calculate(self, batch_info): """ Calculate value of a metric """ value = self._value_function(batch_info) self.storage.append(value) def _value_function(self, batch_info): raise NotImplementedError def reset(self): """ Reset value of a metric """ self.storage = [] def value(self): """ Return current value for the metric """ return float(np.mean(self.storage)) class Loss(AveragingMetric): """ Just a loss function """ def __init__(self): super().__init__('loss') def _value_function(self, batch_info): """ Just forward a value of the loss""" return batch_info['loss'].item() class Model(nn.Module): """ Class representing full neural network model """ def metrics(self) ->list: """ Set of metrics for this model """ return [Loss()] def train(self, mode=True): """ Sets the module in training mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Returns: Module: self """ super().train(mode) if mode: mu.apply_leaf(self, mu.set_train_mode) return self def summary(self, input_size=None, hashsummary=False): """ Print a model summary """ if input_size is None: None None sum(p.numel() for p in self.model.parameters()) None None else: summary(self, input_size) if hashsummary: for idx, hashvalue in enumerate(self.hashsummary()): None def hashsummary(self): """ Print a model summary - checksums of each layer parameters """ children = list(self.children()) result = [] for child in children: result.extend(hashlib.sha256(x.detach().cpu().numpy().tobytes() ).hexdigest() for x in child.parameters()) return result def get_layer_groups(self): """ Return layers grouped """ return [self] def reset_weights(self): """ Call proper initializers for the weights """ pass @property def is_recurrent(self) ->bool: """ If the network is recurrent and needs to be fed state as well as the observations """ return False class BackboneModel(Model): """ Model that serves as a backbone network to connect your heads to """ class ImageToTensor(BackboneModel): """ Convert simple image to tensor. Flip channels to a [C, W, H] order and potentially convert 8-bit color values to floats """ def __init__(self): super().__init__() def reset_weights(self): pass def forward(self, image): result = image.permute(0, 3, 1, 2).contiguous() if result.dtype == torch.uint8: result = result.type(torch.float) / 255.0 else: result = result.type(torch.float) return result def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.optim import torch.nn as nn import torch.nn.utils import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tl.store(out_ptr0 + (x2 + 16 * y3), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 return buf0, class BaseMetric: """ Base class for all the metrics """ def __init__(self, name): self.name = name def calculate(self, batch_info): """ Calculate value of a metric based on supplied data """ raise NotImplementedError def reset(self): """ Reset value of a metric """ raise NotImplementedError def value(self): """ Return current value for the metric """ raise NotImplementedError def write_state_dict(self, training_info: 'TrainingInfo', hidden_state_dict: 'dict') ->None: """ Potentially store some metric state to the checkpoint """ pass def load_state_dict(self, training_info: 'TrainingInfo', hidden_state_dict: 'dict') ->None: """ Potentially load some metric state from the checkpoint """ pass class AveragingMetric(BaseMetric): """ Base class for metrics that simply calculate the average over the epoch """ def __init__(self, name): super().__init__(name) self.storage = [] def calculate(self, batch_info): """ Calculate value of a metric """ value = self._value_function(batch_info) self.storage.append(value) def _value_function(self, batch_info): raise NotImplementedError def reset(self): """ Reset value of a metric """ self.storage = [] def value(self): """ Return current value for the metric """ return float(np.mean(self.storage)) class Loss(AveragingMetric): """ Just a loss function """ def __init__(self): super().__init__('loss') def _value_function(self, batch_info): """ Just forward a value of the loss""" return batch_info['loss'].item() class Model(nn.Module): """ Class representing full neural network model """ def metrics(self) ->list: """ Set of metrics for this model """ return [Loss()] def train(self, mode=True): """ Sets the module in training mode. This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc. Returns: Module: self """ super().train(mode) if mode: mu.apply_leaf(self, mu.set_train_mode) return self def summary(self, input_size=None, hashsummary=False): """ Print a model summary """ if input_size is None: None None sum(p.numel() for p in self.model.parameters()) None None else: summary(self, input_size) if hashsummary: for idx, hashvalue in enumerate(self.hashsummary()): None def hashsummary(self): """ Print a model summary - checksums of each layer parameters """ children = list(self.children()) result = [] for child in children: result.extend(hashlib.sha256(x.detach().cpu().numpy().tobytes() ).hexdigest() for x in child.parameters()) return result def get_layer_groups(self): """ Return layers grouped """ return [self] def reset_weights(self): """ Call proper initializers for the weights """ pass @property def is_recurrent(self) ->bool: """ If the network is recurrent and needs to be fed state as well as the observations """ return False class BackboneModel(Model): """ Model that serves as a backbone network to connect your heads to """ class ImageToTensorNew(BackboneModel): """ Convert simple image to tensor. Flip channels to a [C, W, H] order and potentially convert 8-bit color values to floats """ def __init__(self): super().__init__() def reset_weights(self): pass def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
galatolofederico/vel
ImageToTensor
false
15,395
[ "MIT" ]
273
0473648cffb3f34fb784d12dbb25844ab58ffc3c
https://github.com/galatolofederico/vel/tree/0473648cffb3f34fb784d12dbb25844ab58ffc3c
PreNormTransformerDecoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py # Topologically Sorted Source Nodes: [tgt2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # tgt2 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zv/czv3tzezwxkylzsgkrivaldxprnr7tvjr5iihe4mbc7bzdev5lsj.py # Topologically Sorted Source Nodes: [tgt2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # tgt2 => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ah/cahpqo3o7hv3q647n5lretlqvfljlubj4ic7gscxws4yvkm5jzff.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul_2 # Graph fragment: # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/y5/cy5gjrtl7netbzcjhig66pdorub2vbq2qvwmv3tamld2ehimmlz7.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ji/cjikooh3unjvssdwbmc5bbgrf7argvwkpdjikzfpajfrzpotlkhf.py # Topologically Sorted Source Nodes: [tgt, tgt2_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # tgt => add_2 # tgt2_1 => var_mean_1 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %squeeze), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/j4/cj4vucbv6vxdldbfg73k3ixw2brnd6f754oxugjq3s7syrcrb4qe.py # Topologically Sorted Source Nodes: [tgt, tgt2_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # tgt => add_2 # tgt2_1 => add_3, add_4, mul_3, mul_4, rsqrt_1, sub_2 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %squeeze), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_9), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_8), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_9), kwargs = {}) triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7u/c7up5iv73h2zidaszkmsx5lcplapb2eule6rqadg4siq5zdaoo5m.py # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add] # Source node to ATen node mapping: # tgt => add_2 # tgt_1 => add_5 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %squeeze), kwargs = {}) # %add_5 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %squeeze_2), kwargs = {}) triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_out_ptr0 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/72/c72dbqgy5xkloy2rcp3rynir3hrgprnoecgmcynn3qr6522vq72l.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_18), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_9 = async_compile.triton('triton_poi_fused_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4e/c4escp7v3hikmzkxuyjmzdj3juzy4ty3yheqrkztwu2gvsteirte.py # Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add] # Source node to ATen node mapping: # tgt_2 => add_8 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_20), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_tensor), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (12, 4), (4, 1)) assert_size_stride(primals_12, (12, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (2048, 4), (4, 1)) assert_size_stride(primals_18, (2048, ), (1, )) assert_size_stride(primals_19, (4, 2048), (2048, 1)) assert_size_stride(primals_20, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [tgt2], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 4, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 16, grid=grid(16), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf10, buf11, 4, 4, grid=grid(4, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_7 buf13 = buf1; del buf1 # reuse buf14 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [tgt, tgt2_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(primals_3, buf12, buf13, buf14, 4, grid=grid(4), stream=stream0) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt, tgt2_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_7.run(primals_3, buf12, buf13, buf14, primals_8, primals_9, buf15, 16, grid=grid(16), stream=stream0) del primals_9 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf15, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf16) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_12, (4, ), (1, ), 4), primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_12, (4, ), (1, ), 8), primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf18) buf19 = reinterpret_tensor(buf16, (4, 4, 1), (1, 4, 16), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf19, primals_12, 16, grid=grid(16), stream=stream0) del primals_12 buf20 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf19, reinterpret_tensor(buf17, (4, 1, 4), (1, 1, 4), 0), out=buf20) buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf20, buf21, 64, grid=grid(64), stream=stream0) buf22 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf21, buf22, 64, grid=grid(64), stream=stream0) del buf21 buf23 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf22, reinterpret_tensor(buf18, (4, 4, 1), (1, 4, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf23, buf24, 4, 4, grid=grid(4, 4), stream=stream0) buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0); del buf23 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf24, (4, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf25) buf26 = buf25; del buf25 # reuse # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add] triton_poi_fused_add_8.run(buf26, primals_3, buf12, primals_14, 16, grid=grid(16), stream=stream0) del primals_14 buf27 = buf14; del buf14 # reuse buf28 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [tgt2_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_0.run(buf26, buf27, buf28, 4, grid=grid(4), stream=stream0) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt2_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf26, buf27, buf28, primals_15, primals_16, buf29, 16, grid=grid(16), stream=stream0) del buf27 del buf28 del primals_16 buf30 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf29, reinterpret_tensor(primals_17, (4, 2048), (1, 4), 0), out=buf30) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] triton_poi_fused_relu_9.run(buf31, primals_18, 8192, grid=grid(8192), stream=stream0) del primals_18 buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf31, reinterpret_tensor(primals_19, (2048, 4), (1, 2048), 0), out=buf32) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf33, buf26, primals_20, 16, grid=grid(16), stream=stream0) del primals_20 return (buf33, primals_3, primals_8, primals_15, buf2, buf9, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), buf12, buf15, primals_10, buf22, reinterpret_tensor(buf24, (4, 4), (4, 1), 0), buf26, buf29, buf31, primals_19, primals_17, primals_13, reinterpret_tensor(buf18, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf19, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf17, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PreNormTransformerDecoderLayer(nn.TransformerDecoderLayer): """ A variant of :class:`torch.nn.TransformerDecoderLayer` where layer normalization is included inside the residual branch, and performed before self-attention and feedforward layers. Refer documentation of :class:`torch.nn.TransformerDecoderLayer` for more details on the API. """ def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None): tgt2 = self.norm1(tgt) tgt2, _ = self.self_attn(tgt2, tgt2, tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask) tgt = tgt + self.dropout1(tgt2) tgt2 = self.norm2(tgt) tgt2, _ = self.multihead_attn(tgt2, memory, memory, attn_mask= memory_mask, key_padding_mask=memory_key_padding_mask) tgt = tgt + self.dropout2(tgt2) tgt2 = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) tgt = tgt + self.dropout3(tgt2) return tgt def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (12, 4), (4, 1)) assert_size_stride(primals_12, (12,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (2048, 4), (4, 1)) assert_size_stride(primals_18, (2048,), (1,)) assert_size_stride(primals_19, (4, 2048), (2048, 1)) assert_size_stride(primals_20, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha= 1, beta=1, out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha= 1, beta=1, out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0) del buf3 triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_7 buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_3, buf12, buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_3, buf12, buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf16) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_12, (4,), (1,), 4), primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_12, (4,), (1,), 8), primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf18) buf19 = reinterpret_tensor(buf16, (4, 4, 1), (1, 4, 16), 0) del buf16 triton_poi_fused_mul_2[grid(16)](buf19, primals_12, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 buf20 = buf8 del buf8 extern_kernels.bmm(buf19, reinterpret_tensor(buf17, (4, 1, 4), (1, 1, 4), 0), out=buf20) buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf20, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1) buf22 = buf20 del buf20 triton_poi_fused__softmax_4[grid(64)](buf21, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 buf23 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf22, reinterpret_tensor(buf18, (4, 4, 1), (1, 4, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf23, buf24, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0) del buf23 extern_kernels.mm(reinterpret_tensor(buf24, (4, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf25) buf26 = buf25 del buf25 triton_poi_fused_add_8[grid(16)](buf26, primals_3, buf12, primals_14, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_14 buf27 = buf14 del buf14 buf28 = buf13 del buf13 triton_poi_fused_native_layer_norm_0[grid(4)](buf26, buf27, buf28, 4, XBLOCK=4, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](buf26, buf27, buf28, primals_15, primals_16, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf27 del buf28 del primals_16 buf30 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf29, reinterpret_tensor(primals_17, (4, 2048), (1, 4), 0), out=buf30) buf31 = buf30 del buf30 triton_poi_fused_relu_9[grid(8192)](buf31, primals_18, 8192, XBLOCK =128, num_warps=4, num_stages=1) del primals_18 buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf31, reinterpret_tensor(primals_19, (2048, 4), (1, 2048), 0), out=buf32) buf33 = buf32 del buf32 triton_poi_fused_add_10[grid(16)](buf33, buf26, primals_20, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_20 return (buf33, primals_3, primals_8, primals_15, buf2, buf9, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), buf12, buf15, primals_10, buf22, reinterpret_tensor(buf24, (4, 4), (4, 1), 0), buf26, buf29, buf31, primals_19, primals_17, primals_13, reinterpret_tensor(buf18, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf19, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf17, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0)) class PreNormTransformerDecoderLayerNew(nn.TransformerDecoderLayer): """ A variant of :class:`torch.nn.TransformerDecoderLayer` where layer normalization is included inside the residual branch, and performed before self-attention and feedforward layers. Refer documentation of :class:`torch.nn.TransformerDecoderLayer` for more details on the API. """ def forward(self, input_0, input_1): primals_4 = self.self_attn.in_proj_weight primals_5 = self.self_attn.in_proj_bias primals_3 = self.self_attn.out_proj.weight primals_1 = self.self_attn.out_proj.bias primals_11 = self.multihead_attn.in_proj_weight primals_12 = self.multihead_attn.in_proj_bias primals_6 = self.multihead_attn.out_proj.weight primals_2 = self.multihead_attn.out_proj.bias primals_17 = self.linear1.weight primals_18 = self.linear1.bias primals_19 = self.linear2.weight primals_7 = self.linear2.bias primals_8 = self.norm1.weight primals_9 = self.norm1.bias primals_14 = self.norm2.weight primals_15 = self.norm2.bias primals_16 = self.norm3.weight primals_20 = self.norm3.bias primals_10 = input_0 primals_13 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return output[0]
funnyzhou/REFERS
PreNormTransformerDecoderLayer
false
15,396
[ "MIT" ]
46
392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
https://github.com/funnyzhou/REFERS/tree/392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
CausalConv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gk/cgkw7mtnlzc2x4r4dlaxhd27twdsy4zp5nunyiqlzxzxlut26kgd.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [2], [2], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 6) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6), (24, 6, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 96, grid=grid(96), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 4, 4), (24, 6, 1), 0), primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 6 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6), (24, 6, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(96)](buf1, primals_2, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4), (24, 6, 1), 0 ), primals_1, primals_3 class CausalConv1dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1dNew, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, input_0): primals_1 = self.causal_conv.weight primals_2 = self.causal_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
gaotianyu1350/new_fewrel_bertpair
CausalConv1d
false
15,397
[ "MIT" ]
180
27184050d476fc93576948fb26680d508a2824bb
https://github.com/gaotianyu1350/new_fewrel_bertpair/tree/27184050d476fc93576948fb26680d508a2824bb
OneHotEncode
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nd/cnded5begemoybj4vvfwg6klxnqbgrlwfdrsdau54pqsdnezqdrb.py # Topologically Sorted Source Nodes: [scatter_], Original ATen: [aten.scatter] # Source node to ATen node mapping: # scatter_ => scatter_upon_const_tensor # Graph fragment: # %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [4, 4], background_val: 0, dtype: torch.float32, dim: 1, selector: %view, val: 1}) # %view_2 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%scatter_upon_const_tensor, [4, -1]), kwargs = {}) triton_poi_fused_scatter_0 = async_compile.triton('triton_poi_fused_scatter_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_scatter_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_scatter_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [scatter_], Original ATen: [aten.scatter] stream0 = get_raw_stream(0) triton_poi_fused_scatter_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.optim import torch.nn as nn import torch.nn.utils import torch.autograd def one_hot_encoding(input_tensor, num_labels): """ One-hot encode labels from input """ xview = input_tensor.view(-1, 1) onehot = torch.zeros(xview.size(0), num_labels, device=input_tensor. device, dtype=torch.float) onehot.scatter_(1, xview, 1) return onehot.view(list(input_tensor.shape) + [-1]) class OneHotEncode(nn.Module): """ One-hot encoding layer """ def __init__(self, num_classes): super().__init__() self.num_classes = num_classes def forward(self, x): return one_hot_encoding(x, self.num_classes) def get_inputs(): return [torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.optim import torch.nn as nn import torch.nn.utils import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_scatter_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_scatter_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, def one_hot_encoding(input_tensor, num_labels): """ One-hot encode labels from input """ xview = input_tensor.view(-1, 1) onehot = torch.zeros(xview.size(0), num_labels, device=input_tensor. device, dtype=torch.float) onehot.scatter_(1, xview, 1) return onehot.view(list(input_tensor.shape) + [-1]) class OneHotEncodeNew(nn.Module): """ One-hot encoding layer """ def __init__(self, num_classes): super().__init__() self.num_classes = num_classes def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
galatolofederico/vel
OneHotEncode
false
15,399
[ "MIT" ]
273
0473648cffb3f34fb784d12dbb25844ab58ffc3c
https://github.com/galatolofederico/vel/tree/0473648cffb3f34fb784d12dbb25844ab58ffc3c
TimeBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/67/c67xog3fqmh7up3g7xylki5c4xaiura6ex46fohdxb55xmbektbp.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 3 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (3*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (4*x2) + (12*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zj/czjc75tzlkflgunoon67ylb7s7yegsgnnexz357nyxlusdtqa43q.py # Topologically Sorted Source Nodes: [conv2d, conv2d_1, sigmoid, temp, conv2d_2, add_1, out], Original ATen: [aten.convolution, aten.sigmoid, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add_1 => add_1 # conv2d => convolution # conv2d_1 => convolution_1 # conv2d_2 => convolution_2 # out => relu # sigmoid => sigmoid # temp => add # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %sigmoid), kwargs = {}) # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %convolution_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (x2), xmask) tmp9 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp2) tmp7 = tmp5 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = 0.0 tmp15 = tmp13 <= tmp14 tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(in_out_ptr1 + (x2), tmp13, xmask) tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 3), (12, 1, 12, 4), torch.float32) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_2, buf0, 16, 3, grid=grid(16, 3), stream=stream0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 2), (32, 1, 8, 4)) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(primals_4, buf2, 16, 3, grid=grid(16, 3), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 2), (32, 1, 8, 4)) buf5 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(primals_6, buf5, 16, 3, grid=grid(16, 3), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 2), (32, 1, 8, 4)) del buf5 buf4 = buf3; del buf3 # reuse buf7 = buf1; del buf1 # reuse buf8 = empty_strided_cuda((4, 4, 4, 2), (32, 1, 8, 4), torch.bool) # Topologically Sorted Source Nodes: [conv2d, conv2d_1, sigmoid, temp, conv2d_2, add_1, out], Original ATen: [aten.convolution, aten.sigmoid, aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1.run(buf4, buf7, primals_5, primals_3, buf6, primals_7, buf8, 128, grid=grid(128), stream=stream0) del buf6 del primals_3 del primals_5 del primals_7 return (reinterpret_tensor(buf7, (4, 4, 2, 4), (32, 8, 4, 1), 0), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class TimeBlock(nn.Module): """ Neural network block that applies a temporal convolution to each node of a graph in isolation. """ def __init__(self, in_channels, out_channels, kernel_size=3): """ :param in_channels: Number of input features at each node in each time step. :param out_channels: Desired number of output channels at each node in each time step. :param kernel_size: Size of the 1D temporal kernel. """ super(TimeBlock, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) self.conv2 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) self.conv3 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) def forward(self, X): """ :param X: Input data of shape (batch_size, num_nodes, num_timesteps, num_features=in_channels) :return: Output data of shape (batch_size, num_nodes, num_timesteps_out, num_features_out=out_channels) """ X = X.permute(0, 3, 1, 2) temp = self.conv1(X) + torch.sigmoid(self.conv2(X)) out = F.relu(temp + self.conv3(X)) out = out.permute(0, 2, 3, 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 3 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 3 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 12 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp2) tmp7 = tmp5 + tmp6 tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = 0.0 tmp15 = tmp13 <= tmp14 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(in_out_ptr1 + x2, tmp13, xmask) tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 3), (12, 1, 12, 4), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 3)](primals_2, buf0, 16, 3, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 2), (32, 1, 8, 4)) buf2 = buf0 del buf0 triton_poi_fused_convolution_0[grid(16, 3)](primals_4, buf2, 16, 3, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 2), (32, 1, 8, 4)) buf5 = buf2 del buf2 triton_poi_fused_convolution_0[grid(16, 3)](primals_6, buf5, 16, 3, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 4, 4), (64, 1, 16, 4), 0), buf5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 2), (32, 1, 8, 4)) del buf5 buf4 = buf3 del buf3 buf7 = buf1 del buf1 buf8 = empty_strided_cuda((4, 4, 4, 2), (32, 1, 8, 4), torch.bool) triton_poi_fused_add_convolution_relu_sigmoid_threshold_backward_1[grid (128)](buf4, buf7, primals_5, primals_3, buf6, primals_7, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_3 del primals_5 del primals_7 return reinterpret_tensor(buf7, (4, 4, 2, 4), (32, 8, 4, 1), 0 ), primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, ( 4, 4, 4, 4), (64, 1, 16, 4), 0), buf4, buf8 class TimeBlockNew(nn.Module): """ Neural network block that applies a temporal convolution to each node of a graph in isolation. """ def __init__(self, in_channels, out_channels, kernel_size=3): """ :param in_channels: Number of input features at each node in each time step. :param out_channels: Desired number of output channels at each node in each time step. :param kernel_size: Size of the 1D temporal kernel. """ super(TimeBlockNew, self).__init__() self.conv1 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) self.conv2 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) self.conv3 = nn.Conv2d(in_channels, out_channels, (1, kernel_size)) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
garygsw/STGCN-PyTorch
TimeBlock
false
15,400
[ "MIT" ]
220
83ae49e566c779444efd21fc03cce54a765ee9f7
https://github.com/garygsw/STGCN-PyTorch/tree/83ae49e566c779444efd21fc03cce54a765ee9f7
DenseBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gk/cgkw7mtnlzc2x4r4dlaxhd27twdsy4zp5nunyiqlzxzxlut26kgd.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [2], [2], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 6) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/n4/cn4nrao6ng2dt25fyig6ikjf3xz5iqxb22ofakq7pljb2sg3u6rd.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] # Source node to ATen node mapping: # out => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %mul], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 8 x0 = xindex % 4 x2 = (xindex // 32) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (6*((-4) + x1)) + (24*x2)), tmp6 & xmask, other=0.0) tmp10 = libdevice.tanh(tmp9) tmp11 = tl.load(in_ptr2 + (x0 + (6*((-4) + x1)) + (24*x2)), tmp6 & xmask, other=0.0) tmp12 = tl.sigmoid(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6), (24, 6, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 96, grid=grid(96), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 6), (24, 6, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf3, primals_5, 96, grid=grid(96), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_3, buf1, buf3, buf4, 128, grid=grid(128), stream=stream0) return (buf4, primals_1, primals_3, primals_4, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlock(nn.Module): def __init__(self, in_channels, filters, dilation=2): super(DenseBlock, self).__init__() self.causal_conv1 = CausalConv1d(in_channels, filters, dilation= dilation) self.causal_conv2 = CausalConv1d(in_channels, filters, dilation= dilation) def forward(self, minibatch): tanh = F.tanh(self.causal_conv1(minibatch)) sig = F.sigmoid(self.causal_conv2(minibatch)) out = torch.cat([minibatch, tanh * sig], dim=1) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'filters': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 6 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 6 * (-4 + x1) + 24 * x2), tmp6 & xmask, other=0.0) tmp10 = libdevice.tanh(tmp9) tmp11 = tl.load(in_ptr2 + (x0 + 6 * (-4 + x1) + 24 * x2), tmp6 & xmask, other=0.0) tmp12 = tl.sigmoid(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6), (24, 6, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(96)](buf1, primals_2, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,), padding=(2,), dilation=(2,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 6), (24, 6, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(96)](buf3, primals_5, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(128)](primals_3, buf1, buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) return buf4, primals_1, primals_3, primals_4, buf1, buf3 class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=2, dilation=2): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlockNew(nn.Module): def __init__(self, in_channels, filters, dilation=2): super(DenseBlockNew, self).__init__() self.causal_conv1 = CausalConv1d(in_channels, filters, dilation= dilation) self.causal_conv2 = CausalConv1d(in_channels, filters, dilation= dilation) def forward(self, input_0): primals_1 = self.causal_conv1.causal_conv.weight primals_2 = self.causal_conv1.causal_conv.bias primals_4 = self.causal_conv2.causal_conv.weight primals_5 = self.causal_conv2.causal_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
gaotianyu1350/new_fewrel_bertpair
DenseBlock
false
15,401
[ "MIT" ]
180
27184050d476fc93576948fb26680d508a2824bb
https://github.com/gaotianyu1350/new_fewrel_bertpair/tree/27184050d476fc93576948fb26680d508a2824bb
DiagGaussianActionHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bu/cbuo5xz3d225t2g3fpq2hzoqbtw52eow4m3puopndd4fjlydrcoj.py # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], -1), kwargs = {}) triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x3 = (xindex // 2) x1 = (xindex // 2) % 4 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x4), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [means], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] stream0 = get_raw_stream(0) triton_poi_fused_stack_0.run(buf0, primals_4, buf1, 32, grid=grid(32), stream=stream0) del buf0 del primals_4 return (buf1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.optim import torch.nn as nn import torch.nn.init as init import torch.nn.utils import torch.autograd class DiagGaussianActionHead(nn.Module): """ Action head where actions are normally distibuted uncorrelated variables with specific means and variances. Means are calculated directly from the network while standard deviation are a parameter of this module """ LOG2PI = np.log(2.0 * np.pi) def __init__(self, input_dim, num_dimensions): super().__init__() self.input_dim = input_dim self.num_dimensions = num_dimensions self.linear_layer = nn.Linear(input_dim, num_dimensions) self.log_std = nn.Parameter(torch.zeros(1, num_dimensions)) def forward(self, input_data): means = self.linear_layer(input_data) log_std_tile = self.log_std.repeat(means.size(0), 1) return torch.stack([means, log_std_tile], dim=-1) def sample(self, params, argmax_sampling=False): """ Sample from a probability space of all actions """ means = params[:, :, 0] log_std = params[:, :, 1] if argmax_sampling: return means else: return torch.randn_like(means) * torch.exp(log_std) + means def logprob(self, action_sample, pd_params): """ Log-likelihood """ means = pd_params[:, :, 0] log_std = pd_params[:, :, 1] std = torch.exp(log_std) z_score = (action_sample - means) / std return -(0.5 * (z_score ** 2 + self.LOG2PI).sum(dim=-1) + log_std. sum(dim=-1)) def reset_weights(self): init.orthogonal_(self.linear_layer.weight, gain=0.01) init.constant_(self.linear_layer.bias, 0.0) def entropy(self, params): """ Categorical distribution entropy calculation - sum probs * log(probs). In case of diagonal gaussian distribution - 1/2 log(2 pi e sigma^2) """ log_std = params[:, :, 1] return (log_std + 0.5 * (self.LOG2PI + 1)).sum(dim=-1) def kl_divergence(self, params_q, params_p): """ Categorical distribution KL divergence calculation KL(Q || P) = sum Q_i log (Q_i / P_i) Formula is: log(sigma_p) - log(sigma_q) + (sigma_q^2 + (mu_q - mu_p)^2))/(2 * sigma_p^2) """ means_q = params_q[:, :, 0] log_std_q = params_q[:, :, 1] means_p = params_p[:, :, 0] log_std_p = params_p[:, :, 1] std_q = torch.exp(log_std_q) std_p = torch.exp(log_std_p) kl_div = log_std_p - log_std_q + (std_q ** 2 + (means_q - means_p) ** 2 ) / (2.0 * std_p ** 2) - 0.5 return kl_div.sum(dim=-1) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'num_dimensions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.optim import torch.nn as nn import torch.nn.init as init import torch.nn.utils import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x3 = xindex // 2 x1 = xindex // 2 % 4 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp9 = tl.load(in_ptr1 + x1, tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor( primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(32)](buf0, primals_4, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf0 del primals_4 return buf1, primals_3 class DiagGaussianActionHeadNew(nn.Module): """ Action head where actions are normally distibuted uncorrelated variables with specific means and variances. Means are calculated directly from the network while standard deviation are a parameter of this module """ LOG2PI = np.log(2.0 * np.pi) def __init__(self, input_dim, num_dimensions): super().__init__() self.input_dim = input_dim self.num_dimensions = num_dimensions self.linear_layer = nn.Linear(input_dim, num_dimensions) self.log_std = nn.Parameter(torch.zeros(1, num_dimensions)) def sample(self, params, argmax_sampling=False): """ Sample from a probability space of all actions """ means = params[:, :, 0] log_std = params[:, :, 1] if argmax_sampling: return means else: return torch.randn_like(means) * torch.exp(log_std) + means def logprob(self, action_sample, pd_params): """ Log-likelihood """ means = pd_params[:, :, 0] log_std = pd_params[:, :, 1] std = torch.exp(log_std) z_score = (action_sample - means) / std return -(0.5 * (z_score ** 2 + self.LOG2PI).sum(dim=-1) + log_std. sum(dim=-1)) def reset_weights(self): init.orthogonal_(self.linear_layer.weight, gain=0.01) init.constant_(self.linear_layer.bias, 0.0) def entropy(self, params): """ Categorical distribution entropy calculation - sum probs * log(probs). In case of diagonal gaussian distribution - 1/2 log(2 pi e sigma^2) """ log_std = params[:, :, 1] return (log_std + 0.5 * (self.LOG2PI + 1)).sum(dim=-1) def kl_divergence(self, params_q, params_p): """ Categorical distribution KL divergence calculation KL(Q || P) = sum Q_i log (Q_i / P_i) Formula is: log(sigma_p) - log(sigma_q) + (sigma_q^2 + (mu_q - mu_p)^2))/(2 * sigma_p^2) """ means_q = params_q[:, :, 0] log_std_q = params_q[:, :, 1] means_p = params_p[:, :, 0] log_std_p = params_p[:, :, 1] std_q = torch.exp(log_std_q) std_p = torch.exp(log_std_p) kl_div = log_std_p - log_std_q + (std_q ** 2 + (means_q - means_p) ** 2 ) / (2.0 * std_p ** 2) - 0.5 return kl_div.sum(dim=-1) def forward(self, input_0): primals_4 = self.log_std primals_1 = self.linear_layer.weight primals_2 = self.linear_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
galatolofederico/vel
DiagGaussianActionHead
false
15,402
[ "MIT" ]
273
0473648cffb3f34fb784d12dbb25844ab58ffc3c
https://github.com/galatolofederico/vel/tree/0473648cffb3f34fb784d12dbb25844ab58ffc3c
DotAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nu/cnuc7ivckuuly7yn2763pwt3sw72jd6vuwpeeu4sfespm5iz7fq4.py # Topologically Sorted Source Nodes: [attention_weight_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_weight_1 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fj/cfjl47pvhwbpfbvh6rfehwy5ijxc5p3zgkld2lwf3mw5bl6pbkak.py # Topologically Sorted Source Nodes: [attention_weight_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_weight_1 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weight_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [attention_weight_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [attention_out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3) del arg2_1 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class DotAttention(nn.Module): def __init__(self, dropout=0.0): super(DotAttention, self).__init__() self.dropout = dropout def forward(self, Q, K, V, mask_out=None, head_mask=None): """ 一般输入信息 X 时,假设 K = V = X att_weight = softmax( score_func(q, k) ) att = sum( att_weight * v ) :param Q: [..., L, H] :param K: [..., S, H] :param V: [..., S, H] :param mask_out: [..., 1, S] :return: """ H = Q.size(-1) scale = float(H) ** 0.5 attention_weight = torch.matmul(Q, K.transpose(-1, -2)) / scale if mask_out is not None: while mask_out.dim() != Q.dim(): mask_out = mask_out.unsqueeze(1) attention_weight.masked_fill_(mask_out, -100000000.0) attention_weight = F.softmax(attention_weight, dim=-1) attention_weight = F.dropout(attention_weight, self.dropout) if head_mask is not None: attention_weight = attention_weight * head_mask attention_out = torch.matmul(attention_weight, V) return attention_out, attention_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2 class DotAttentionNew(nn.Module): def __init__(self, dropout=0.0): super(DotAttentionNew, self).__init__() self.dropout = dropout def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
fmc123653/DeepKE
DotAttention
false
15,403
[ "MIT" ]
676
4d30e51368681c7cb73e2ecacf9b922b441cbe99
https://github.com/fmc123653/DeepKE/tree/4d30e51368681c7cb73e2ecacf9b922b441cbe99
CosSim
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pd/cpdi7xzvi46tuzqsv24tk5nffrrhhudrzinef6vmc6ndavfymp7f.py # Topologically Sorted Source Nodes: [norms, nfeat], Original ATen: [aten.linalg_vector_norm, aten.div] # Source node to ATen node mapping: # nfeat => div # norms => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {}) triton_poi_fused_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6m/c6metdnckygxysmgr4vz3hy6kzlisxiamgwv4z5i6krg4kdyzyde.py # Topologically Sorted Source Nodes: [norms_c, ncenters], Original ATen: [aten.linalg_vector_norm, aten.div] # Source node to ATen node mapping: # ncenters => div_1 # norms_c => pow_3, pow_4, sum_2 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_4), kwargs = {}) triton_poi_fused_div_linalg_vector_norm_1 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_linalg_vector_norm_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norms, nfeat], Original ATen: [aten.linalg_vector_norm, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_linalg_vector_norm_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [norms_c, ncenters], Original ATen: [aten.linalg_vector_norm, aten.div] triton_poi_fused_div_linalg_vector_norm_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class CosSim(nn.Module): def __init__(self, nfeat, nclass, codebook=None, learn_cent=True): super(CosSim, self).__init__() self.nfeat = nfeat self.nclass = nclass self.learn_cent = learn_cent if codebook is None: codebook = torch.randn(nclass, nfeat) self.centroids = nn.Parameter(codebook.clone()) if not learn_cent: self.centroids.requires_grad_(False) def forward(self, x): norms = torch.norm(x, p=2, dim=-1, keepdim=True) nfeat = torch.div(x, norms) norms_c = torch.norm(self.centroids, p=2, dim=-1, keepdim=True) ncenters = torch.div(self.centroids, norms_c) logits = torch.matmul(nfeat, torch.transpose(ncenters, 0, 1)) return logits def extra_repr(self) ->str: return 'in_features={}, n_class={}, learn_centroid={}'.format(self. nfeat, self.nclass, self.learn_cent) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nclass': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_div_linalg_vector_norm_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_linalg_vector_norm_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_linalg_vector_norm_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class CosSimNew(nn.Module): def __init__(self, nfeat, nclass, codebook=None, learn_cent=True): super(CosSimNew, self).__init__() self.nfeat = nfeat self.nclass = nclass self.learn_cent = learn_cent if codebook is None: codebook = torch.randn(nclass, nfeat) self.centroids = nn.Parameter(codebook.clone()) if not learn_cent: self.centroids.requires_grad_(False) def extra_repr(self) ->str: return 'in_features={}, n_class={}, learn_centroid={}'.format(self. nfeat, self.nclass, self.learn_cent) def forward(self, input_0): primals_2 = self.centroids primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
gajrajgchouhan/orthohash
CosSim
false
15,404
[ "BSD-3-Clause" ]
51
4e04cfe1dd32e21ba004e308d5a1ce9c8578ea2b
https://github.com/gajrajgchouhan/orthohash/tree/4e04cfe1dd32e21ba004e308d5a1ce9c8578ea2b
PrecomputedNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/yi/cyiakmznzllqklcnfmh5hu3smacswbrh4drlxafkndcx6god674a.py # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 4), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 4), kwargs = {}) triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.25 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PrecomputedNorm(nn.Module): """Normalization using Pre-computed Mean/Std. Args: stats: Precomputed (mean, std). axis: Axis setting used to calculate mean/variance. """ def __init__(self, stats, axis=[1, 2]): super().__init__() self.axis = axis self.mean, self.std = stats def forward(self, X: 'torch.Tensor') ->torch.Tensor: return (X - self.mean) / self.std def __repr__(self): format_string = (self.__class__.__name__ + f'(mean={self.mean}, std={self.std}, axis={self.axis})') return format_string def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'stats': [4, 4]}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.25 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class PrecomputedNormNew(nn.Module): """Normalization using Pre-computed Mean/Std. Args: stats: Precomputed (mean, std). axis: Axis setting used to calculate mean/variance. """ def __init__(self, stats, axis=[1, 2]): super().__init__() self.axis = axis self.mean, self.std = stats def __repr__(self): format_string = (self.__class__.__name__ + f'(mean={self.mean}, std={self.std}, axis={self.axis})') return format_string def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
gcambara/s3prl
PrecomputedNorm
false
15,405
[ "MIT" ]
856
33284ebde3a903ed8604d6dae85669d0174ae1d3
https://github.com/gcambara/s3prl/tree/33284ebde3a903ed8604d6dae85669d0174ae1d3
PGenLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nc/cnc6a3vkphurm472zdavmn3qnff4lmaezxs63jlllw2kks2e62a4.py # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # input_1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6q/c6q567bxmyjlgebbvymd32lebt3oyhtpbyxv4hl6hj7rwb7ah4ft.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1, 12), (12, 1)) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 48, grid=grid(48), stream=stream0) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 1), (1, 12), 0), out=buf1) del primals_4 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf2, primals_5, 4, grid=grid(4), stream=stream0) del primals_5 return (buf2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PGenLayer(nn.Module): def __init__(self, emb_dim, hidden_size, enc_dim): super(PGenLayer, self).__init__() self.emb_dim = emb_dim self.hidden_size = hidden_size self.enc_dim = enc_dim self.lin = nn.Linear(self.emb_dim + self.hidden_size + self.enc_dim, 1) def forward(self, emb, hid, enc): """ param: emb (batch_size, emb_dim) hid (batch_size, hid_dim) enc (batch_size, enc_dim) """ input = torch.cat((emb, hid, enc), 1) return F.sigmoid(self.lin(input)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'emb_dim': 4, 'hidden_size': 4, 'enc_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (1, 12), (12, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 1), (1, 12), 0), out=buf1) del primals_4 buf2 = buf1 del buf1 triton_poi_fused_sigmoid_1[grid(4)](buf2, primals_5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_5 return buf2, buf0, buf2 class PGenLayerNew(nn.Module): def __init__(self, emb_dim, hidden_size, enc_dim): super(PGenLayerNew, self).__init__() self.emb_dim = emb_dim self.hidden_size = hidden_size self.enc_dim = enc_dim self.lin = nn.Linear(self.emb_dim + self.hidden_size + self.enc_dim, 1) def forward(self, input_0, input_1, input_2): primals_4 = self.lin.weight primals_5 = self.lin.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
gau820827/AI-writer_Data2Doc
PGenLayer
false
15,406
[ "Apache-2.0" ]
77
6be0ee6238158a47aa0fdfa8a34df2a47714835a
https://github.com/gau820827/AI-writer_Data2Doc/tree/6be0ee6238158a47aa0fdfa8a34df2a47714835a
AMSoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py # Topologically Sorted Source Nodes: [x_BxH], Original ATen: [aten.div] # Source node to ATen node mapping: # x_BxH => div_1 # Graph fragment: # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand_1), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xe/cxewggzrfqe57dzglxrzfhfgpsywlh36utvtdulp5oi75wfs7ml3.py # Topologically Sorted Source Nodes: [W], Original ATen: [aten.div] # Source node to ATen node mapping: # W => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %expand), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zq/czqoy42ufye2afnopzmn2loqzazgyudvxxmzemheojq4scu6j5lz.py # Topologically Sorted Source Nodes: [sub, numerator], Original ATen: [aten.sub, aten.mul] # Source node to ATen node mapping: # numerator => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%diagonal, 0.4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 30.0), kwargs = {}) triton_poi_fused_mul_sub_2 = async_compile.triton('triton_poi_fused_mul_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_BxH], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [W], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_3, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [W, wf], Original ATen: [aten.div, aten.mm] extern_kernels.mm(buf0, buf1, out=buf2) del buf1 buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sub, numerator], Original ATen: [aten.sub, aten.mul] triton_poi_fused_mul_sub_2.run(primals_2, buf2, buf3, 4, grid=grid(4), stream=stream0) return (buf3, buf2, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AMSoftmaxLoss(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super(AMSoftmaxLoss, self).__init__() self.s = s self.m = m self.speaker_num = speaker_num self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, x_BxH, labels_B): """ x shape: (B, H) labels shape: (B) """ assert len(x_BxH) == len(labels_B) assert torch.min(labels_B) >= 0 assert torch.max(labels_B) < self.speaker_num W = F.normalize(self.W, dim=0) x_BxH = F.normalize(x_BxH, dim=1) wf = torch.mm(x_BxH, W) numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels_B]) - self.m) excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0 ) for i, y in enumerate(labels_B)], dim=0) denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s * excl), dim=1) L = numerator - torch.log(denominator) return -torch.mean(L) def get_inputs(): return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'hidden_dim': 4, 'speaker_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_3, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, buf1, out=buf2) del buf1 buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_sub_2[grid(4)](primals_2, buf2, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) return buf3, buf2, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4 ), (1, 4), 0) class AMSoftmaxLossNew(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super(AMSoftmaxLossNew, self).__init__() self.s = s self.m = m self.speaker_num = speaker_num self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, input_0, input_1): primals_1 = self.W primals_3 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
gcambara/s3prl
AMSoftmaxLoss
false
15,407
[ "MIT" ]
856
33284ebde3a903ed8604d6dae85669d0174ae1d3
https://github.com/gcambara/s3prl/tree/33284ebde3a903ed8604d6dae85669d0174ae1d3
TransformerDecoderBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten.clone] # Source node to ATen node mapping: # attention_weights => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bs/cbsluabtq7ll426nybkislhh3cajm6f7ggrxam362hohynwnvtk6.py # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] # Source node to ATen node mapping: # eq => eq # Graph fragment: # %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_8, 0), kwargs = {}) triton_poi_fused_eq_1 = async_compile.triton('triton_poi_fused_eq_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/iy/ciyhvyb7xizkj35n26blwkf3c4fzjrlfkn3hj5kfwzymzbhjnas4.py # Topologically Sorted Source Nodes: [scaled_attention_weights, scaled_attention_weights_1, scaled_attention_weights_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # scaled_attention_weights => div # scaled_attention_weights_1 => full_default, where # scaled_attention_weights_2 => amax, exp, sub, sum_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float("-inf") tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + (x2), tmp20, xmask) tl.store(out_ptr1 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xl/cxldkpg2u26hjvcnmrovwmvl5gxdgvlr6g7hjfh5mfzqsuhtv66f.py # Topologically Sorted Source Nodes: [scaled_attention_weights, scaled_attention_weights_1, scaled_attention_weights_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # scaled_attention_weights => div # scaled_attention_weights_1 => full_default, where # scaled_attention_weights_2 => amax, div_1, exp, sub # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x4 = xindex x5 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x4), xmask) tmp6 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float("-inf") tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(in_out_ptr0 + (x4), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [attention_output], Original ATen: [aten.clone] # Source node to ATen node mapping: # attention_output => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py # Topologically Sorted Source Nodes: [add, mha_layer_1_out_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # mha_layer_1_out_1 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/iz/cizh7p23zwsiqbrt6dvrlvjzpyujwvyyaolptfk5xtby6foymiaz.py # Topologically Sorted Source Nodes: [add, mha_layer_1_out_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # mha_layer_1_out_1 => add_1, add_2, mul, mul_1, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_11), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u6/cu6cquvlwzfwu4v7r3jxylwoleoxnkfo2k23kwu7m6nmrzu4lozu.py # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] # Source node to ATen node mapping: # add_1 => add_3 # Graph fragment: # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_35, %add_2), kwargs = {}) triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/m7/cm7alyvey7ehr4ipkugewxth4wd7hd3x37adkk5crh4vogw2ddrm.py # Topologically Sorted Source Nodes: [mha_layer_2_out_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # mha_layer_2_out_1 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4d/c4dqmytly5zolisxgude22db5p3pbommvxualmvn6ercwtoppkm7.py # Topologically Sorted Source Nodes: [mha_layer_2_out_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # mha_layer_2_out_1 => add_4, add_5, mul_2, mul_3, rsqrt_1, sub_3, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_23), kwargs = {}) # %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_24), kwargs = {}) triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ol/coljc3cs7q6hio3b326qftbftlnedhni7mopq6hnmq3reufgrywy.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_37,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_10 = async_compile.triton('triton_poi_fused_relu_threshold_backward_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_10(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4, ), (1, )) assert_size_stride(primals_20, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_21, (4, 4), (4, 1)) assert_size_stride(primals_22, (4, ), (1, )) assert_size_stride(primals_23, (4, ), (1, )) assert_size_stride(primals_24, (4, ), (1, )) assert_size_stride(primals_25, (4, 4), (4, 1)) assert_size_stride(primals_26, (4, ), (1, )) assert_size_stride(primals_27, (4, 4), (4, 1)) assert_size_stride(primals_28, (4, ), (1, )) assert_size_stride(primals_29, (4, ), (1, )) assert_size_stride(primals_30, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] triton_poi_fused_eq_1.run(primals_8, buf6, 64, grid=grid(64), stream=stream0) del primals_8 buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [scaled_attention_weights, scaled_attention_weights_1, scaled_attention_weights_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_2.run(buf6, buf5, buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [scaled_attention_weights, scaled_attention_weights_1, scaled_attention_weights_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_3.run(buf9, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [weighted_v], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_7, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [weighted_v], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [attention_output], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [attention_output_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add, mha_layer_1_out_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(buf13, primals_3, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mha_layer_1_out_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(buf13, primals_3, buf14, buf15, primals_11, primals_12, buf16, 64, grid=grid(64), stream=stream0) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf18) del primals_15 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf19) del primals_18 buf20 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weights_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf17, primals_14, buf20, 16, 4, grid=grid(16, 4), stream=stream0) del primals_14 buf21 = reinterpret_tensor(buf17, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf17 # reuse # Topologically Sorted Source Nodes: [attention_weights_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf18, primals_16, buf21, 16, 4, grid=grid(16, 4), stream=stream0) del primals_16 buf22 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weights_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf21, (16, 1, 4), (4, 0, 1), 0), out=buf22) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [eq_1], Original ATen: [aten.eq] triton_poi_fused_eq_1.run(primals_20, buf23, 64, grid=grid(64), stream=stream0) del primals_20 buf24 = reinterpret_tensor(buf18, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf18 # reuse buf25 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [scaled_attention_weights_1, scaled_attention_weights_4, scaled_attention_weights_5, scaled_attention_weights_6], Original ATen: [aten.masked_fill, aten.div, aten._softmax] triton_poi_fused__softmax_div_masked_fill_2.run(buf23, buf22, buf24, buf25, 64, grid=grid(64), stream=stream0) buf26 = reinterpret_tensor(buf22, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf22 # reuse # Topologically Sorted Source Nodes: [scaled_attention_weights_1, scaled_attention_weights_4, scaled_attention_weights_5, scaled_attention_weights_6], Original ATen: [aten.masked_fill, aten.div, aten._softmax] triton_poi_fused__softmax_div_masked_fill_3.run(buf26, buf23, buf24, buf25, 256, grid=grid(256), stream=stream0) buf27 = reinterpret_tensor(buf25, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf25 # reuse # Topologically Sorted Source Nodes: [weighted_v_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf19, primals_19, buf27, 16, 4, grid=grid(16, 4), stream=stream0) del primals_19 buf28 = reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 1), 0); del buf19 # reuse # Topologically Sorted Source Nodes: [weighted_v_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf27, (16, 4, 1), (4, 1, 0), 0), out=buf28) buf29 = reinterpret_tensor(buf24, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf24 # reuse # Topologically Sorted Source Nodes: [attention_output_3], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf28, buf29, 16, 4, grid=grid(16, 4), stream=stream0) buf30 = reinterpret_tensor(buf28, (16, 4), (4, 1), 0); del buf28 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0); del buf30 # reuse # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] triton_poi_fused_add_7.run(buf31, primals_22, buf16, 64, grid=grid(64), stream=stream0) del primals_22 buf32 = buf15; del buf15 # reuse buf33 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [mha_layer_2_out_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_8.run(buf31, buf32, buf33, 16, grid=grid(16), stream=stream0) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mha_layer_2_out_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_9.run(buf31, buf32, buf33, primals_23, primals_24, buf34, 64, grid=grid(64), stream=stream0) del primals_24 buf35 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf34, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), out=buf35) buf36 = reinterpret_tensor(buf35, (4, 4, 4), (16, 4, 1), 0); del buf35 # reuse buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_10.run(buf36, primals_26, buf42, 64, grid=grid(64), stream=stream0) del primals_26 buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf37) buf38 = reinterpret_tensor(buf37, (4, 4, 4), (16, 4, 1), 0); del buf37 # reuse # Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add] triton_poi_fused_add_7.run(buf38, primals_28, buf34, 64, grid=grid(64), stream=stream0) del primals_28 buf39 = buf33; del buf33 # reuse buf40 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [ffn_out_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_8.run(buf38, buf39, buf40, 16, grid=grid(16), stream=stream0) buf41 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ffn_out_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_9.run(buf38, buf39, buf40, primals_29, primals_30, buf41, 64, grid=grid(64), stream=stream0) del buf39 del buf40 del primals_30 return (buf41, primals_3, primals_11, primals_23, primals_29, buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_17, (16, 4), (4, 1), 0), buf23, buf26, reinterpret_tensor(buf29, (16, 4), (4, 1), 0), buf31, reinterpret_tensor(buf34, (16, 4), (4, 1), 0), reinterpret_tensor(buf36, (16, 4), (4, 1), 0), buf38, primals_27, buf42, primals_25, primals_21, reinterpret_tensor(buf27, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf20, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 4), 0), primals_13, primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class AddAndNorm(nn.Module): def __init__(self, d_model): super(AddAndNorm, self).__init__() self.layer_norm = nn.LayerNorm(d_model) def forward(self, x, residual): return self.layer_norm(x + residual) class ScaledDotProductAttention(nn.Module): def __init__(self, d_head): super(ScaledDotProductAttention, self).__init__() self.d_head = d_head self.attention_dropout = nn.Dropout(p=0.1) def forward(self, q, k, v, mask=None): attention_weights = torch.matmul(q, k.transpose(-2, -1)) scaled_attention_weights = attention_weights / math.sqrt(self.d_head) if mask is not None: scaled_attention_weights = scaled_attention_weights.masked_fill( mask == 0, float('-inf')) scaled_attention_weights = nn.functional.softmax( scaled_attention_weights, dim=-1) scaled_attention_weights = self.attention_dropout( scaled_attention_weights) weighted_v = torch.matmul(scaled_attention_weights, v) return weighted_v class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads assert d_model % n_heads == 0 self.d_head = d_model // n_heads self.dot_product_attention_layer = ScaledDotProductAttention(self. d_head) self.W_0 = nn.Linear(d_model, d_model) def _split_into_heads(self, q, k, v): q = q.view(q.size(0), q.size(1), self.n_heads, self.d_head) k = k.view(k.size(0), k.size(1), self.n_heads, self.d_head) v = v.view(v.size(0), v.size(1), self.n_heads, self.d_head) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def _concatenate_heads(self, attention_output): attention_output = attention_output.transpose(1, 2).contiguous() attention_output = attention_output.view(attention_output.size(0), attention_output.size(1), -1) return attention_output def forward(self, q, k, v, mask=None): q, k, v = self._split_into_heads(q, k, v) attention_output = self.dot_product_attention_layer(q, k, v, mask) attention_output = self._concatenate_heads(attention_output) attention_output = self.W_0(attention_output) return attention_output class PositionWiseFeedForwardNet(nn.Module): def __init__(self, d_model, d_ff): super(PositionWiseFeedForwardNet, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(0.1) def forward(self, x): return self.w_2(self.dropout(torch.relu(self.w_1(x)))) class TransformerDecoderBlock(nn.Module): def __init__(self, d_model, n_heads, d_ff, dropout_proba): super(TransformerDecoderBlock, self).__init__() self.W_q_1 = nn.Linear(d_model, d_model) self.W_k_1 = nn.Linear(d_model, d_model) self.W_v_1 = nn.Linear(d_model, d_model) self.mha_layer_1 = MultiHeadAttention(d_model, n_heads) self.dropout_layer_1 = nn.Dropout(dropout_proba) self.add_and_norm_1 = AddAndNorm(d_model) self.W_q_2 = nn.Linear(d_model, d_model) self.W_k_2 = nn.Linear(d_model, d_model) self.W_v_2 = nn.Linear(d_model, d_model) self.mha_layer_2 = MultiHeadAttention(d_model, n_heads) self.dropout_layer_2 = nn.Dropout(dropout_proba) self.add_and_norm_2 = AddAndNorm(d_model) self.ffn_layer = PositionWiseFeedForwardNet(d_model, d_ff) self.dropout_layer_3 = nn.Dropout(dropout_proba) self.add_and_norm_3 = AddAndNorm(d_model) def forward(self, x, encoder_output, src_mask, trg_mask): q_1 = self.W_q_1(x) k_1 = self.W_k_1(x) v_1 = self.W_v_1(x) mha_layer_1_out = self.mha_layer_1(q_1, k_1, v_1, trg_mask) mha_layer_1_out = self.dropout_layer_1(mha_layer_1_out) mha_layer_1_out = self.add_and_norm_1(mha_layer_1_out, x) q_2 = self.W_q_2(mha_layer_1_out) k_2 = self.W_k_2(encoder_output) v_2 = self.W_v_2(encoder_output) mha_layer_2_out = self.mha_layer_2(q_2, k_2, v_2, src_mask) mha_layer_2_out = self.dropout_layer_2(mha_layer_2_out) mha_layer_2_out = self.add_and_norm_2(mha_layer_2_out, mha_layer_1_out) ffn_out = self.ffn_layer(mha_layer_2_out) ffn_out = self.dropout_layer_3(ffn_out) ffn_out = self.add_and_norm_3(ffn_out, mha_layer_2_out) return ffn_out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'n_heads': 4, 'd_ff': 4, 'dropout_proba': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float('-inf') tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x4 = xindex x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_out_ptr0 + x4, xmask) tmp6 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float('-inf') tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(in_out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_10(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_21, (4, 4), (4, 1)) assert_size_stride(primals_22, (4,), (1,)) assert_size_stride(primals_23, (4,), (1,)) assert_size_stride(primals_24, (4,), (1,)) assert_size_stride(primals_25, (4, 4), (4, 1)) assert_size_stride(primals_26, (4,), (1,)) assert_size_stride(primals_27, (4, 4), (4, 1)) assert_size_stride(primals_28, (4,), (1,)) assert_size_stride(primals_29, (4,), (1,)) assert_size_stride(primals_30, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_eq_1[grid(64)](primals_8, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf9, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf10, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf13, primals_3, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf13, primals_3, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_17, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf18) del primals_15 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_17, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf19) del primals_18 buf20 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf17, primals_14, buf20, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_14 buf21 = reinterpret_tensor(buf17, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf17 triton_poi_fused_clone_0[grid(16, 4)](buf18, primals_16, buf21, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_16 buf22 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf20, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf21, (16, 1, 4), (4, 0, 1), 0), out=buf22) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_eq_1[grid(64)](primals_20, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_20 buf24 = reinterpret_tensor(buf18, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf18 buf25 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf23, buf22, buf24, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) buf26 = reinterpret_tensor(buf22, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf22 triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf26, buf23, buf24, buf25, 256, XBLOCK=128, num_warps=4, num_stages=1) buf27 = reinterpret_tensor(buf25, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf25 triton_poi_fused_clone_0[grid(16, 4)](buf19, primals_19, buf27, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_19 buf28 = reinterpret_tensor(buf19, (16, 4, 1), (4, 1, 1), 0) del buf19 extern_kernels.bmm(reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf27, (16, 4, 1), (4, 1, 0), 0), out=buf28) buf29 = reinterpret_tensor(buf24, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf24 triton_poi_fused_clone_4[grid(16, 4)](buf28, buf29, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf30 = reinterpret_tensor(buf28, (16, 4), (4, 1), 0) del buf28 extern_kernels.mm(reinterpret_tensor(buf29, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0) del buf30 triton_poi_fused_add_7[grid(64)](buf31, primals_22, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_22 buf32 = buf15 del buf15 buf33 = buf14 del buf14 triton_poi_fused_native_layer_norm_8[grid(16)](buf31, buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(64)](buf31, buf32, buf33, primals_23, primals_24, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_24 buf35 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf34, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), out=buf35) buf36 = reinterpret_tensor(buf35, (4, 4, 4), (16, 4, 1), 0) del buf35 buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_10[grid(64)](buf36, primals_26, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_26 buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf36, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf37) buf38 = reinterpret_tensor(buf37, (4, 4, 4), (16, 4, 1), 0) del buf37 triton_poi_fused_add_7[grid(64)](buf38, primals_28, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_28 buf39 = buf33 del buf33 buf40 = buf32 del buf32 triton_poi_fused_native_layer_norm_8[grid(16)](buf38, buf39, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(64)](buf38, buf39, buf40, primals_29, primals_30, buf41, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf39 del buf40 del primals_30 return (buf41, primals_3, primals_11, primals_23, primals_29, buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor( primals_17, (16, 4), (4, 1), 0), buf23, buf26, reinterpret_tensor( buf29, (16, 4), (4, 1), 0), buf31, reinterpret_tensor(buf34, (16, 4 ), (4, 1), 0), reinterpret_tensor(buf36, (16, 4), (4, 1), 0), buf38, primals_27, buf42, primals_25, primals_21, reinterpret_tensor(buf27, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf20, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 4), 0), primals_13, primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)) class AddAndNorm(nn.Module): def __init__(self, d_model): super(AddAndNorm, self).__init__() self.layer_norm = nn.LayerNorm(d_model) def forward(self, x, residual): return self.layer_norm(x + residual) class ScaledDotProductAttention(nn.Module): def __init__(self, d_head): super(ScaledDotProductAttention, self).__init__() self.d_head = d_head self.attention_dropout = nn.Dropout(p=0.1) def forward(self, q, k, v, mask=None): attention_weights = torch.matmul(q, k.transpose(-2, -1)) scaled_attention_weights = attention_weights / math.sqrt(self.d_head) if mask is not None: scaled_attention_weights = scaled_attention_weights.masked_fill( mask == 0, float('-inf')) scaled_attention_weights = nn.functional.softmax( scaled_attention_weights, dim=-1) scaled_attention_weights = self.attention_dropout( scaled_attention_weights) weighted_v = torch.matmul(scaled_attention_weights, v) return weighted_v class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads assert d_model % n_heads == 0 self.d_head = d_model // n_heads self.dot_product_attention_layer = ScaledDotProductAttention(self. d_head) self.W_0 = nn.Linear(d_model, d_model) def _split_into_heads(self, q, k, v): q = q.view(q.size(0), q.size(1), self.n_heads, self.d_head) k = k.view(k.size(0), k.size(1), self.n_heads, self.d_head) v = v.view(v.size(0), v.size(1), self.n_heads, self.d_head) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def _concatenate_heads(self, attention_output): attention_output = attention_output.transpose(1, 2).contiguous() attention_output = attention_output.view(attention_output.size(0), attention_output.size(1), -1) return attention_output def forward(self, q, k, v, mask=None): q, k, v = self._split_into_heads(q, k, v) attention_output = self.dot_product_attention_layer(q, k, v, mask) attention_output = self._concatenate_heads(attention_output) attention_output = self.W_0(attention_output) return attention_output class PositionWiseFeedForwardNet(nn.Module): def __init__(self, d_model, d_ff): super(PositionWiseFeedForwardNet, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(0.1) def forward(self, x): return self.w_2(self.dropout(torch.relu(self.w_1(x)))) class TransformerDecoderBlockNew(nn.Module): def __init__(self, d_model, n_heads, d_ff, dropout_proba): super(TransformerDecoderBlockNew, self).__init__() self.W_q_1 = nn.Linear(d_model, d_model) self.W_k_1 = nn.Linear(d_model, d_model) self.W_v_1 = nn.Linear(d_model, d_model) self.mha_layer_1 = MultiHeadAttention(d_model, n_heads) self.dropout_layer_1 = nn.Dropout(dropout_proba) self.add_and_norm_1 = AddAndNorm(d_model) self.W_q_2 = nn.Linear(d_model, d_model) self.W_k_2 = nn.Linear(d_model, d_model) self.W_v_2 = nn.Linear(d_model, d_model) self.mha_layer_2 = MultiHeadAttention(d_model, n_heads) self.dropout_layer_2 = nn.Dropout(dropout_proba) self.add_and_norm_2 = AddAndNorm(d_model) self.ffn_layer = PositionWiseFeedForwardNet(d_model, d_ff) self.dropout_layer_3 = nn.Dropout(dropout_proba) self.add_and_norm_3 = AddAndNorm(d_model) def forward(self, input_0, input_1, input_2, input_3): primals_1 = self.W_q_1.weight primals_2 = self.W_q_1.bias primals_4 = self.W_k_1.weight primals_5 = self.W_k_1.bias primals_6 = self.W_v_1.weight primals_7 = self.W_v_1.bias primals_9 = self.mha_layer_1.W_0.weight primals_10 = self.mha_layer_1.W_0.bias primals_11 = self.add_and_norm_1.layer_norm.weight primals_12 = self.add_and_norm_1.layer_norm.bias primals_13 = self.W_q_2.weight primals_14 = self.W_q_2.bias primals_15 = self.W_k_2.weight primals_16 = self.W_k_2.bias primals_18 = self.W_v_2.weight primals_19 = self.W_v_2.bias primals_21 = self.mha_layer_2.W_0.weight primals_22 = self.mha_layer_2.W_0.bias primals_23 = self.add_and_norm_2.layer_norm.weight primals_24 = self.add_and_norm_2.layer_norm.bias primals_25 = self.ffn_layer.w_1.weight primals_26 = self.ffn_layer.w_1.bias primals_27 = self.ffn_layer.w_2.weight primals_28 = self.ffn_layer.w_2.bias primals_29 = self.add_and_norm_3.layer_norm.weight primals_30 = self.add_and_norm_3.layer_norm.bias primals_3 = input_0 primals_8 = input_1 primals_17 = input_2 primals_20 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30]) return output[0]
francismontalbo/attention-is-all-you-need-paper
TransformerDecoderBlock
false
15,408
[ "MIT" ]
167
21ba3e48917da0c6808126d183bece6a9969cfd2
https://github.com/francismontalbo/attention-is-all-you-need-paper/tree/21ba3e48917da0c6808126d183bece6a9969cfd2
TransformerEncoderBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten.clone] # Source node to ATen node mapping: # attention_weights => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bs/cbsluabtq7ll426nybkislhh3cajm6f7ggrxam362hohynwnvtk6.py # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] # Source node to ATen node mapping: # eq => eq # Graph fragment: # %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_8, 0), kwargs = {}) triton_poi_fused_eq_1 = async_compile.triton('triton_poi_fused_eq_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/iy/ciyhvyb7xizkj35n26blwkf3c4fzjrlfkn3hj5kfwzymzbhjnas4.py # Topologically Sorted Source Nodes: [scaled_attention_weights, scaled_attention_weights_1, scaled_attention_weights_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # scaled_attention_weights => div # scaled_attention_weights_1 => full_default, where # scaled_attention_weights_2 => amax, exp, sub, sum_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float("-inf") tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + (x2), tmp20, xmask) tl.store(out_ptr1 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xl/cxldkpg2u26hjvcnmrovwmvl5gxdgvlr6g7hjfh5mfzqsuhtv66f.py # Topologically Sorted Source Nodes: [scaled_attention_weights, scaled_attention_weights_1, scaled_attention_weights_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # scaled_attention_weights => div # scaled_attention_weights_1 => full_default, where # scaled_attention_weights_2 => amax, div_1, exp, sub # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x4 = xindex x5 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x4), xmask) tmp6 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float("-inf") tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(in_out_ptr0 + (x4), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [attention_output], Original ATen: [aten.clone] # Source node to ATen node mapping: # attention_output => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py # Topologically Sorted Source Nodes: [add, mha_out_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # mha_out_1 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_17), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/iz/cizh7p23zwsiqbrt6dvrlvjzpyujwvyyaolptfk5xtby6foymiaz.py # Topologically Sorted Source Nodes: [add, mha_out_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # mha_out_1 => add_1, add_2, mul, mul_1, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_17), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_11), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u4/cu4mvhweewrefdurxuza5qfbqlwomkc67kmxkkaurh6luaf2e2fz.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_19,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_relu_threshold_backward_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/he/chevf4d6tadiz3y2a2abr2lj2bvo3wyfykoivwj2s4xedp3vdjuf.py # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] # Source node to ATen node mapping: # add_1 => add_3 # Graph fragment: # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_21), kwargs = {}) triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hn/chnyp4bqchi6cc3qkpikodtjzt7sfs4gz3r2kunqaesb7ahrywso.py # Topologically Sorted Source Nodes: [ffn_out_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # ffn_out_2 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/al/cal3txxjlyumb2wxf6pzsp7g5yvv5ygiluv6ygjjzldvb2woph4t.py # Topologically Sorted Source Nodes: [ffn_out_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # ffn_out_2 => add_4, add_5, mul_2, mul_3, rsqrt_1, sub_2, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_17), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_18), kwargs = {}) triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, ), (1, )) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] triton_poi_fused_eq_1.run(primals_8, buf6, 64, grid=grid(64), stream=stream0) del primals_8 buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [scaled_attention_weights, scaled_attention_weights_1, scaled_attention_weights_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_2.run(buf6, buf5, buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [scaled_attention_weights, scaled_attention_weights_1, scaled_attention_weights_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_3.run(buf9, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [weighted_v], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_7, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [weighted_v], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [attention_output], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [attention_output_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add, mha_out_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(primals_3, buf13, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mha_out_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(primals_3, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, grid=grid(64), stream=stream0) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0); del buf17 # reuse buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_7.run(buf18, primals_14, buf24, 64, grid=grid(64), stream=stream0) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] triton_poi_fused_add_8.run(buf20, buf16, primals_16, 64, grid=grid(64), stream=stream0) del primals_16 buf21 = buf15; del buf15 # reuse buf22 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [ffn_out_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_9.run(buf20, buf21, buf22, 16, grid=grid(16), stream=stream0) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ffn_out_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_10.run(buf20, buf21, buf22, primals_17, primals_18, buf23, 64, grid=grid(64), stream=stream0) del buf21 del buf22 del primals_18 return (buf23, primals_3, primals_11, primals_17, buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf20, primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class AddAndNorm(nn.Module): def __init__(self, d_model): super(AddAndNorm, self).__init__() self.layer_norm = nn.LayerNorm(d_model) def forward(self, x, residual): return self.layer_norm(x + residual) class ScaledDotProductAttention(nn.Module): def __init__(self, d_head): super(ScaledDotProductAttention, self).__init__() self.d_head = d_head self.attention_dropout = nn.Dropout(p=0.1) def forward(self, q, k, v, mask=None): attention_weights = torch.matmul(q, k.transpose(-2, -1)) scaled_attention_weights = attention_weights / math.sqrt(self.d_head) if mask is not None: scaled_attention_weights = scaled_attention_weights.masked_fill( mask == 0, float('-inf')) scaled_attention_weights = nn.functional.softmax( scaled_attention_weights, dim=-1) scaled_attention_weights = self.attention_dropout( scaled_attention_weights) weighted_v = torch.matmul(scaled_attention_weights, v) return weighted_v class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads assert d_model % n_heads == 0 self.d_head = d_model // n_heads self.dot_product_attention_layer = ScaledDotProductAttention(self. d_head) self.W_0 = nn.Linear(d_model, d_model) def _split_into_heads(self, q, k, v): q = q.view(q.size(0), q.size(1), self.n_heads, self.d_head) k = k.view(k.size(0), k.size(1), self.n_heads, self.d_head) v = v.view(v.size(0), v.size(1), self.n_heads, self.d_head) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def _concatenate_heads(self, attention_output): attention_output = attention_output.transpose(1, 2).contiguous() attention_output = attention_output.view(attention_output.size(0), attention_output.size(1), -1) return attention_output def forward(self, q, k, v, mask=None): q, k, v = self._split_into_heads(q, k, v) attention_output = self.dot_product_attention_layer(q, k, v, mask) attention_output = self._concatenate_heads(attention_output) attention_output = self.W_0(attention_output) return attention_output class PositionWiseFeedForwardNet(nn.Module): def __init__(self, d_model, d_ff): super(PositionWiseFeedForwardNet, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(0.1) def forward(self, x): return self.w_2(self.dropout(torch.relu(self.w_1(x)))) class TransformerEncoderBlock(nn.Module): def __init__(self, d_model, n_heads, d_ff, dropout_proba): super(TransformerEncoderBlock, self).__init__() self.W_q = nn.Linear(d_model, d_model) self.W_k = nn.Linear(d_model, d_model) self.W_v = nn.Linear(d_model, d_model) self.mha_layer = MultiHeadAttention(d_model, n_heads) self.dropout_layer_1 = nn.Dropout(dropout_proba) self.add_and_norm_layer_1 = AddAndNorm(d_model) self.ffn_layer = PositionWiseFeedForwardNet(d_model, d_ff) self.dropout_layer_2 = nn.Dropout(dropout_proba) self.add_and_norm_layer_2 = AddAndNorm(d_model) def forward(self, x, mask): q = self.W_q(x) k = self.W_k(x) v = self.W_v(x) mha_out = self.mha_layer(q, k, v, mask) mha_out = self.dropout_layer_1(mha_out) mha_out = self.add_and_norm_layer_1(x, mha_out) ffn_out = self.ffn_layer(mha_out) ffn_out = self.dropout_layer_2(ffn_out) ffn_out = self.add_and_norm_layer_2(mha_out, ffn_out) return ffn_out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'n_heads': 4, 'd_ff': 4, 'dropout_proba': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float('-inf') tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x4 = xindex x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_out_ptr0 + x4, xmask) tmp6 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float('-inf') tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(in_out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_eq_1[grid(64)](primals_8, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf9, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf10, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_3, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_3, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0) del buf17 buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf18, primals_14, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_10[grid(64)](buf20, buf21, buf22, primals_17, primals_18, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_18 return (buf23, primals_3, primals_11, primals_17, buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor( buf18, (16, 4), (4, 1), 0), buf20, primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)) class AddAndNorm(nn.Module): def __init__(self, d_model): super(AddAndNorm, self).__init__() self.layer_norm = nn.LayerNorm(d_model) def forward(self, x, residual): return self.layer_norm(x + residual) class ScaledDotProductAttention(nn.Module): def __init__(self, d_head): super(ScaledDotProductAttention, self).__init__() self.d_head = d_head self.attention_dropout = nn.Dropout(p=0.1) def forward(self, q, k, v, mask=None): attention_weights = torch.matmul(q, k.transpose(-2, -1)) scaled_attention_weights = attention_weights / math.sqrt(self.d_head) if mask is not None: scaled_attention_weights = scaled_attention_weights.masked_fill( mask == 0, float('-inf')) scaled_attention_weights = nn.functional.softmax( scaled_attention_weights, dim=-1) scaled_attention_weights = self.attention_dropout( scaled_attention_weights) weighted_v = torch.matmul(scaled_attention_weights, v) return weighted_v class MultiHeadAttention(nn.Module): def __init__(self, d_model, n_heads): super(MultiHeadAttention, self).__init__() self.n_heads = n_heads assert d_model % n_heads == 0 self.d_head = d_model // n_heads self.dot_product_attention_layer = ScaledDotProductAttention(self. d_head) self.W_0 = nn.Linear(d_model, d_model) def _split_into_heads(self, q, k, v): q = q.view(q.size(0), q.size(1), self.n_heads, self.d_head) k = k.view(k.size(0), k.size(1), self.n_heads, self.d_head) v = v.view(v.size(0), v.size(1), self.n_heads, self.d_head) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def _concatenate_heads(self, attention_output): attention_output = attention_output.transpose(1, 2).contiguous() attention_output = attention_output.view(attention_output.size(0), attention_output.size(1), -1) return attention_output def forward(self, q, k, v, mask=None): q, k, v = self._split_into_heads(q, k, v) attention_output = self.dot_product_attention_layer(q, k, v, mask) attention_output = self._concatenate_heads(attention_output) attention_output = self.W_0(attention_output) return attention_output class PositionWiseFeedForwardNet(nn.Module): def __init__(self, d_model, d_ff): super(PositionWiseFeedForwardNet, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(0.1) def forward(self, x): return self.w_2(self.dropout(torch.relu(self.w_1(x)))) class TransformerEncoderBlockNew(nn.Module): def __init__(self, d_model, n_heads, d_ff, dropout_proba): super(TransformerEncoderBlockNew, self).__init__() self.W_q = nn.Linear(d_model, d_model) self.W_k = nn.Linear(d_model, d_model) self.W_v = nn.Linear(d_model, d_model) self.mha_layer = MultiHeadAttention(d_model, n_heads) self.dropout_layer_1 = nn.Dropout(dropout_proba) self.add_and_norm_layer_1 = AddAndNorm(d_model) self.ffn_layer = PositionWiseFeedForwardNet(d_model, d_ff) self.dropout_layer_2 = nn.Dropout(dropout_proba) self.add_and_norm_layer_2 = AddAndNorm(d_model) def forward(self, input_0, input_1): primals_1 = self.W_q.weight primals_2 = self.W_q.bias primals_4 = self.W_k.weight primals_5 = self.W_k.bias primals_6 = self.W_v.weight primals_7 = self.W_v.bias primals_9 = self.mha_layer.W_0.weight primals_10 = self.mha_layer.W_0.bias primals_11 = self.add_and_norm_layer_1.layer_norm.weight primals_12 = self.add_and_norm_layer_1.layer_norm.bias primals_13 = self.ffn_layer.w_1.weight primals_14 = self.ffn_layer.w_1.bias primals_15 = self.ffn_layer.w_2.weight primals_16 = self.ffn_layer.w_2.bias primals_17 = self.add_and_norm_layer_2.layer_norm.weight primals_18 = self.add_and_norm_layer_2.layer_norm.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
francismontalbo/attention-is-all-you-need-paper
TransformerEncoderBlock
false
15,409
[ "MIT" ]
167
21ba3e48917da0c6808126d183bece6a9969cfd2
https://github.com/francismontalbo/attention-is-all-you-need-paper/tree/21ba3e48917da0c6808126d183bece6a9969cfd2
Attn
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/uh/cuhd4sxp2wb4hmkwkbfnedhvqmhnyxpm2ir7x5hym6xzu6dmmawn.py # Topologically Sorted Source Nodes: [hiddens], Original ATen: [aten.repeat] # Source node to ATen node mapping: # hiddens => repeat # Graph fragment: # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 4, 1]), kwargs = {}) triton_poi_fused_repeat_0 = async_compile.triton('triton_poi_fused_repeat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hiddens], Original ATen: [aten.repeat] stream0 = get_raw_stream(0) triton_poi_fused_repeat_0.run(primals_2, buf1, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0), out=buf2) del buf0 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0) del buf3 return (reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf4, reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Attn(nn.Module): """ The score function for the attention mechanism. We define the score function as the general function from Luong et al. Where score(s_{i}, h_{j}) = s_{i} * W * h_{j} """ def __init__(self, hidden_size): super(Attn, self).__init__() self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size, self.hidden_size) def forward(self, hidden, encoder_outputs): _batch_size, seq_len, _hidden_size = encoder_outputs.size() hidden = hidden.unsqueeze(1) hiddens = hidden.repeat(1, seq_len, 1) attn_energies = self.score(hiddens, encoder_outputs) return F.softmax(attn_energies, dim=1).unsqueeze(1) def score(self, hidden, encoder_outputs): energy = self.attn(encoder_outputs) hidden = hidden.unsqueeze(2) energy = energy.unsqueeze(3) energy = torch.matmul(hidden, energy) return energy.squeeze(3).squeeze(2) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_3 del primals_4 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_repeat_0[grid(64)](primals_2, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0), out=buf2) del buf0 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf3 return reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 4), 0) class AttnNew(nn.Module): """ The score function for the attention mechanism. We define the score function as the general function from Luong et al. Where score(s_{i}, h_{j}) = s_{i} * W * h_{j} """ def __init__(self, hidden_size): super(AttnNew, self).__init__() self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size, self.hidden_size) def score(self, hidden, encoder_outputs): energy = self.attn(encoder_outputs) hidden = hidden.unsqueeze(2) energy = energy.unsqueeze(3) energy = torch.matmul(hidden, energy) return energy.squeeze(3).squeeze(2) def forward(self, input_0, input_1): primals_2 = self.attn.weight primals_4 = self.attn.bias primals_3 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
gau820827/AI-writer_Data2Doc
Attn
false
15,410
[ "Apache-2.0" ]
77
6be0ee6238158a47aa0fdfa8a34df2a47714835a
https://github.com/gau820827/AI-writer_Data2Doc/tree/6be0ee6238158a47aa0fdfa8a34df2a47714835a
AP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3v/c3vbbnaoh2ala54xhjzwr7f44xb5tmg7hvdni6ytelrhdlekfg4j.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_8, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/22/c22avtz547ccq6m4s32gquk4cdjjs4eaicgqenyimouechn25s2u.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = (xindex // 64) x5 = (xindex // 4) % 16 x2 = (xindex // 16) % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + (x7), tmp42, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [feature_BxTxH], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_5, buf8, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(primals_8, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_2.run(buf0, primals_8, buf4, buf5, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del buf6 return (buf7, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, primals_6, buf8, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class AP(nn.Module): """ Attentive Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(AP, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.sap_layer = AttentivePooling(out_dim) self.act_fn = nn.ReLU() def forward(self, feature_BxTxH, att_mask_BxT): """ Arguments feature_BxTxH - [BxTxH] Acoustic feature with shape att_mask_BxT - [BxT] Attention Mask logits """ feature_BxTxH = self.linear(feature_BxTxH) sap_vec, _ = self.sap_layer(feature_BxTxH, att_mask_BxT) return sap_vec def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4, 'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = xindex // 64 x5 = xindex // 4 % 16 x2 = xindex // 16 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + x7, tmp42, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, primals_5, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_8, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(256)](buf0, primals_8, buf4, buf5, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 return buf7, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf4, primals_6, buf8, primals_4 class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class APNew(nn.Module): """ Attentive Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(APNew, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.sap_layer = AttentivePooling(out_dim) self.act_fn = nn.ReLU() def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.sap_layer.W_a.weight primals_5 = self.sap_layer.W_a.bias primals_6 = self.sap_layer.W.weight primals_7 = self.sap_layer.W.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
gcambara/s3prl
AP
false
15,411
[ "MIT" ]
856
33284ebde3a903ed8604d6dae85669d0174ae1d3
https://github.com/gcambara/s3prl/tree/33284ebde3a903ed8604d6dae85669d0174ae1d3
AttentivePoolingModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uu/cuuixbc7clu3x4xnld3clwlzm4bwe3sea4shtlguimyhwbrzdnjg.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_6, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp8 = tmp7 + tmp3 tmp9 = tmp6 + tmp8 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 + tmp3 tmp14 = tmp11 + tmp13 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 + tmp3 tmp19 = tmp16 + tmp18 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + (x2), tmp20, xmask) tl.store(out_ptr1 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/26/c26r27cnuaxawyutnmstc5xa4iiskv2zoymvaaom3awrmuaav47p.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => div, exp, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_6, %squeeze), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_add_2 = async_compile.triton('triton_poi_fused__softmax_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last') tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2q/c2qalxfq2yxll6mbnxojhbih4eltcgaqrxucbiibajluczsoe5lz.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x1 = (xindex // 4) % 16 x2 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(primals_6, buf2, primals_5, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_2.run(primals_6, buf2, primals_5, buf3, buf4, buf5, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del buf4 del primals_5 del primals_6 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_3.run(primals_3, buf5, buf6, 256, grid=grid(256), stream=stream0) return (buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf5, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AttentivePoolingModule(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, activation='ReLU', **kwargs): super(AttentivePoolingModule, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = getattr(nn, activation)() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp8 = tmp7 + tmp3 tmp9 = tmp6 + tmp8 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 + tmp3 tmp14 = tmp11 + tmp13 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 + tmp3 tmp19 = tmp16 + tmp18 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last') tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x1 = xindex // 4 % 16 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_6, buf2, primals_5, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_add_2[grid(256)](primals_6, buf2, primals_5, buf3, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del buf4 del primals_5 del primals_6 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_3[grid(256)](primals_3, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0 ), primals_3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf5, primals_4, buf7 class AttentivePoolingModuleNew(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, activation='ReLU', **kwargs): super(AttentivePoolingModuleNew, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = getattr(nn, activation)() self.softmax = nn.functional.softmax def forward(self, input_0, input_1): primals_1 = self.W_a.weight primals_2 = self.W_a.bias primals_4 = self.W.weight primals_5 = self.W.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
gcambara/s3prl
AttentivePoolingModule
false
15,412
[ "MIT" ]
856
33284ebde3a903ed8604d6dae85669d0174ae1d3
https://github.com/gcambara/s3prl/tree/33284ebde3a903ed8604d6dae85669d0174ae1d3
ASP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3v/c3vbbnaoh2ala54xhjzwr7f44xb5tmg7hvdni6ytelrhdlekfg4j.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_8, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ec/cecsiupxfmmb3m5bbivf5bciiq2k5ishu3qab2b2u6oieray7t5b.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %view_1), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x5 = (xindex // 4) % 64 x7 = (xindex // 16) x8 = xindex % 256 x9 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x7), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x7), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + (x8), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (x9), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pk/cpky4ikacz2hgolxoehy6foq57gecnbejjobg6du4jubou3tbafn.py # Topologically Sorted Source Nodes: [mul, utter_rep, mul_2, sum_2, pow_1, sub, add_1, variance], Original ATen: [aten.mul, aten.sum, aten.pow, aten.sub, aten.add, aten.sqrt] # Source node to ATen node mapping: # add_1 => add_1 # mul => mul # mul_2 => mul_2 # pow_1 => pow_1 # sub => sub_1 # sum_2 => sum_3 # utter_rep => sum_2 # variance => sqrt # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %view_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, %pow_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-08), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%sqrt, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_2, 2.0), kwargs = {}) triton_poi_fused_add_mul_pow_sqrt_sub_sum_3 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 24, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_sqrt_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x6 = xindex % 64 x3 = (xindex // 64) x4 = (xindex // 4) % 16 x2 = (xindex // 16) % 4 x0 = xindex % 4 x5 = (xindex // 4) x8 = xindex tmp0 = tl.load(in_ptr0 + (x6), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (64 + x6), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (16 + x4), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (128 + x6), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (32 + x4), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (192 + x6), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (48 + x4), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr5 + (x6 + (256*x3)), xmask) tmp45 = tl.load(in_ptr5 + (64 + x6 + (256*x3)), xmask) tmp48 = tl.load(in_ptr5 + (128 + x6 + (256*x3)), xmask) tmp51 = tl.load(in_ptr5 + (192 + x6 + (256*x3)), xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tmp44 = tmp43 * tmp0 tmp46 = tmp45 * tmp10 tmp47 = tmp44 + tmp46 tmp49 = tmp48 * tmp21 tmp50 = tmp47 + tmp49 tmp52 = tmp51 * tmp32 tmp53 = tmp50 + tmp52 tmp54 = tmp42 * tmp42 tmp55 = tmp53 - tmp54 tmp56 = 1e-08 tmp57 = tmp55 + tmp56 tmp58 = libdevice.sqrt(tmp57) tmp59 = 2.0 tmp60 = tmp58 * tmp59 tmp61 = tmp42 * tmp59 tl.store(out_ptr0 + (x0 + (8*x5)), tmp42, xmask) tl.store(out_ptr2 + (x0 + (8*x5)), tmp58, xmask) tl.store(out_ptr3 + (x8), tmp60, xmask) tl.store(out_ptr4 + (x8), tmp61, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [feature_BxTxH], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_5, buf14, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(primals_8, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_8, buf4, buf5, buf6, buf0, buf8, 1024, grid=grid(1024), stream=stream0) buf11 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) buf7 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 0) # alias buf10 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 4) # alias buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep, mul_2, sum_2, pow_1, sub, add_1, variance], Original ATen: [aten.mul, aten.sum, aten.pow, aten.sub, aten.add, aten.sqrt] triton_poi_fused_add_mul_pow_sqrt_sub_sum_3.run(buf0, primals_8, buf4, buf5, buf6, buf8, buf7, buf10, buf12, buf13, 256, grid=grid(256), stream=stream0) del buf5 del buf6 return (buf11, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, buf8, buf12, buf13, primals_6, buf14, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class ASP(nn.Module): """ Attentive Statistic Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(ASP, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.ap_layer = AttentivePooling(out_dim) def forward(self, feature_BxTxH, att_mask_BxT): """ Arguments feature_BxTxH - [BxTxH] Acoustic feature with shape att_mask_BxT - [BxT] Attention Mask logits """ feature_BxTxH = self.linear(feature_BxTxH) sap_vec, att_w = self.ap_layer(feature_BxTxH, att_mask_BxT) variance = torch.sqrt(torch.sum(att_w * feature_BxTxH * feature_BxTxH, dim=1) - sap_vec ** 2 + 1e-08) statistic_pooling = torch.cat([sap_vec, variance], dim=-1) return statistic_pooling def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4, 'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex // 4 % 64 x7 = xindex // 16 x8 = xindex % 256 x9 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x7, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x7, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + x8, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x9, tmp9, xmask) @triton.jit def triton_poi_fused_add_mul_pow_sqrt_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x6 = xindex % 64 x3 = xindex // 64 x4 = xindex // 4 % 16 x2 = xindex // 16 % 4 x0 = xindex % 4 x5 = xindex // 4 x8 = xindex tmp0 = tl.load(in_ptr0 + x6, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x6), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x4), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x6), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x4), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x6), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x4), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp43 = tl.load(in_ptr5 + (x6 + 256 * x3), xmask) tmp45 = tl.load(in_ptr5 + (64 + x6 + 256 * x3), xmask) tmp48 = tl.load(in_ptr5 + (128 + x6 + 256 * x3), xmask) tmp51 = tl.load(in_ptr5 + (192 + x6 + 256 * x3), xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tmp44 = tmp43 * tmp0 tmp46 = tmp45 * tmp10 tmp47 = tmp44 + tmp46 tmp49 = tmp48 * tmp21 tmp50 = tmp47 + tmp49 tmp52 = tmp51 * tmp32 tmp53 = tmp50 + tmp52 tmp54 = tmp42 * tmp42 tmp55 = tmp53 - tmp54 tmp56 = 1e-08 tmp57 = tmp55 + tmp56 tmp58 = libdevice.sqrt(tmp57) tmp59 = 2.0 tmp60 = tmp58 * tmp59 tmp61 = tmp42 * tmp59 tl.store(out_ptr0 + (x0 + 8 * x5), tmp42, xmask) tl.store(out_ptr2 + (x0 + 8 * x5), tmp58, xmask) tl.store(out_ptr3 + x8, tmp60, xmask) tl.store(out_ptr4 + x8, tmp61, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, primals_5, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_8, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(1024)](primals_8, buf4, buf5, buf6, buf0, buf8, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32 ) buf7 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 0) buf10 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 4) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_pow_sqrt_sub_sum_3[grid(256)](buf0, primals_8, buf4, buf5, buf6, buf8, buf7, buf10, buf12, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 return buf11, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf4, buf8, buf12, buf13, primals_6, buf14, primals_4 class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class ASPNew(nn.Module): """ Attentive Statistic Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(ASPNew, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.ap_layer = AttentivePooling(out_dim) def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.ap_layer.W_a.weight primals_5 = self.ap_layer.W_a.bias primals_6 = self.ap_layer.W.weight primals_7 = self.ap_layer.W.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
gcambara/s3prl
ASP
false
15,413
[ "MIT" ]
856
33284ebde3a903ed8604d6dae85669d0174ae1d3
https://github.com/gcambara/s3prl/tree/33284ebde3a903ed8604d6dae85669d0174ae1d3
RegLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3l/c3lzkatm5eqqfbbtcqxxw7oujhlvnqrxq6gcyetj3kemmpsltiiy.py # Topologically Sorted Source Nodes: [reg_loss, norm_1, reg_loss_1, norm_2, reg_loss_2, norm_3, reg_loss_3], Original ATen: [aten.linalg_vector_norm, aten.add] # Source node to ATen node mapping: # norm_1 => pow_3, pow_4, sum_2 # norm_2 => pow_5, pow_6, sum_3 # norm_3 => pow_7, pow_8, sum_4 # reg_loss => pow_1, pow_2, sum_1 # reg_loss_1 => add # reg_loss_2 => add_1 # reg_loss_3 => add_2 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, %pow_4), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_2, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, None), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %pow_6), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_3, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, None), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %pow_8), kwargs = {}) triton_per_fused_add_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr0 + (64 + r0), None) tmp10 = tl.load(in_ptr0 + (128 + r0), None) tmp15 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = libdevice.sqrt(tmp4) tmp21 = libdevice.sqrt(tmp9) tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp14) tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp19) tmp26 = tmp24 + tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [reg_loss, norm_1, reg_loss_1, norm_2, reg_loss_2, norm_3, reg_loss_3], Original ATen: [aten.linalg_vector_norm, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_0.run(buf4, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class RegLoss(nn.Module): """ RegLoss, L2 regularization on model parameters """ def __init__(self): super(RegLoss, self).__init__() def forward(self, parameters): reg_loss = None for W in parameters: if reg_loss is None: reg_loss = W.norm(2) else: reg_loss = reg_loss + W.norm(2) return reg_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr0 + (64 + r0), None) tmp10 = tl.load(in_ptr0 + (128 + r0), None) tmp15 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = libdevice.sqrt(tmp4) tmp21 = libdevice.sqrt(tmp9) tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp14) tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp19) tmp26 = tmp24 + tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_0[grid(1)](buf4, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf4, class RegLossNew(nn.Module): """ RegLoss, L2 regularization on model parameters """ def __init__(self): super(RegLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
geekinglcq/HRec
RegLoss
false
15,414
[ "MIT" ]
49
b3a67f7721e6e73a7af37d308b5b00e9df68d495
https://github.com/geekinglcq/HRec/tree/b3a67f7721e6e73a7af37d308b5b00e9df68d495
SelfAttentionPooling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rg/crg522m3y4v7k4jllgwpydciu6bjqsfnsxrer5whyf4hotsoe5rw.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/h7/ch7ziltjnllhlwal6dz2n67p6gl5e2gojxkzuefleah4glcy25od.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uu/cuuihy3fs6zmxgj73f52xxftscmhwmh6ubigwdpfc7qqdm3hgdhp.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x1 = (xindex // 4) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask) tmp1 = tl.load(in_ptr1 + (x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask) tmp4 = tl.load(in_ptr1 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask) tmp8 = tl.load(in_ptr1 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask) tmp12 = tl.load(in_ptr1 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_2.run(primals_3, buf3, buf4, 64, grid=grid(64), stream=stream0) del buf3 return (buf4, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPooling, self).__init__() self.W = nn.Linear(input_dim, 1) def forward(self, batch_rep): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ softmax = nn.functional.softmax att_w = softmax(self.W(batch_rep).squeeze(-1)).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x1 = xindex // 4 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask) tmp1 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask) tmp4 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask) tmp8 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask) tmp12 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused_mul_sum_2[grid(64)](primals_3, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 return buf4, primals_3, buf1 class SelfAttentionPoolingNew(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPoolingNew, self).__init__() self.W = nn.Linear(input_dim, 1) def forward(self, input_0): primals_1 = self.W.weight primals_2 = self.W.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
gcambara/s3prl
SelfAttentionPooling
false
15,415
[ "MIT" ]
856
33284ebde3a903ed8604d6dae85669d0174ae1d3
https://github.com/gcambara/s3prl/tree/33284ebde3a903ed8604d6dae85669d0174ae1d3
SoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # loss => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/t2/ct2dbabladhyyceg2gmfqrslgo4edv7x6gs7iscumud7suileuje.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] # Source node to ATen node mapping: # loss => div, exp, log, mul, neg, sub_1, sum_1, sum_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_4), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits_BxSpn], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf3, buf1, primals_4, 1, 256, grid=grid(1), stream=stream0) del buf1 return (buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SoftmaxLoss(nn.Module): def __init__(self, hidden_dim, speaker_num, **kwargs): """ Softmax Loss """ super(SoftmaxLoss, self).__init__() self.fc = nn.Linear(hidden_dim, speaker_num) self.loss = nn.CrossEntropyLoss() def forward(self, x_BxH, labels_B): """ x shape: (B, H) labels shape: (B) """ logits_BxSpn = self.fc(x_BxH) loss = self.loss(logits_BxSpn, labels_B) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4, 'speaker_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf3, buf1, primals_4, 1, 256, num_warps=2, num_stages=1) del buf1 return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class SoftmaxLossNew(nn.Module): def __init__(self, hidden_dim, speaker_num, **kwargs): """ Softmax Loss """ super(SoftmaxLossNew, self).__init__() self.fc = nn.Linear(hidden_dim, speaker_num) self.loss = nn.CrossEntropyLoss() def forward(self, input_0, input_1): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
gcambara/s3prl
SoftmaxLoss
false
15,416
[ "MIT" ]
856
33284ebde3a903ed8604d6dae85669d0174ae1d3
https://github.com/gcambara/s3prl/tree/33284ebde3a903ed8604d6dae85669d0174ae1d3
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3a/c3ahpcqb72o7gdbgt53cgnedr7p2tje6sfperji7zazze5egjmmz.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._prelu_kernel] # Source node to ATen node mapping: # x => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %view_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused__prelu_kernel_0 = async_compile.triton('triton_poi_fused__prelu_kernel_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (0)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._prelu_kernel] stream0 = get_raw_stream(0) triton_poi_fused__prelu_kernel_0.run(buf0, primals_4, buf1, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten._prelu_kernel, aten.native_dropout] buf2 = torch.ops.aten.native_dropout.default(buf1, 0.2, True) buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_6 return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf4, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn as nn from torch.nn.modules.module import Module class MLP(Module): """ A Simple two layers MLP to make SGC a bit better. """ def __init__(self, nfeat, nhid, nclass, dp=0.2): super(MLP, self).__init__() self.W1 = nn.Linear(nfeat, nhid) self.W2 = nn.Linear(nhid, nclass) self.dp = dp self.act = nn.PReLU() self.num_class = nclass def forward(self, x): x = self.act(self.W1(x)) x = nn.Dropout(p=self.dp)(x) return self.W2(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn as nn from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__prelu_kernel_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_0[grid(256)](buf0, primals_4, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = torch.ops.aten.native_dropout.default(buf1, 0.2, True) buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_6 return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, buf4, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_5 class MLPNew(Module): """ A Simple two layers MLP to make SGC a bit better. """ def __init__(self, nfeat, nhid, nclass, dp=0.2): super(MLPNew, self).__init__() self.W1 = nn.Linear(nfeat, nhid) self.W2 = nn.Linear(nhid, nclass) self.dp = dp self.act = nn.PReLU() self.num_class = nclass def forward(self, input_0): primals_1 = self.W1.weight primals_2 = self.W1.bias primals_5 = self.W2.weight primals_6 = self.W2.bias primals_4 = self.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
gear/gfnn
MLP
false
15,417
[ "MIT" ]
46
36667861caacba921469d43917d002896e832c3f
https://github.com/gear/gfnn/tree/36667861caacba921469d43917d002896e832c3f
KGCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rp/crpg2xwsfquznkqzvnyazfnl5y4eqbe55cxzbx4lgyyat3i355e3.py # Topologically Sorted Source Nodes: [add, h], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add => add # h => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1; del buf1 # reuse buf8 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [add, h], Original ATen: [aten.add, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0.run(buf2, primals_4, buf8, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf2, out=buf3) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf4, out=buf5) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [h_4], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf5, out=buf6) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf6, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_6 return (buf7, buf6, primals_5, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), buf8, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import math import torch import torch.nn.functional as F import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.modules.module import Module class GraphConvolution(Module): """ Simple GCN layer """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class KGCN(Module): """ A bit more complex GNN to deal with non-convex feature space. """ def __init__(self, nhidden, nfeat, nclass, degree): super(KGCN, self).__init__() self.Wx = GraphConvolution(nfeat, nhidden) self.W = nn.Linear(nhidden, nclass) self.d = degree def forward(self, x, adj): h = F.relu(self.Wx(x, adj)) for i in range(self.d): h = torch.spmm(adj, h) return self.W(h) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nhidden': 4, 'nfeat': 4, 'nclass': 4, 'degree': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import math import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0[grid(16)](buf2, primals_4, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(primals_3, buf2, out=buf3) buf4 = buf2 del buf2 extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = buf3 del buf3 extern_kernels.mm(primals_3, buf4, out=buf5) buf6 = buf4 del buf4 extern_kernels.mm(primals_3, buf5, out=buf6) buf7 = buf5 del buf5 extern_kernels.addmm(primals_6, buf6, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_6 return buf7, buf6, primals_5, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), buf8, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class KGCNNew(Module): """ A bit more complex GNN to deal with non-convex feature space. """ def __init__(self, nhidden, nfeat, nclass, degree): super(KGCNNew, self).__init__() self.Wx = GraphConvolution(nfeat, nhidden) self.W = nn.Linear(nhidden, nclass) self.d = degree def forward(self, input_0, input_1): primals_1 = self.Wx.weight primals_4 = self.Wx.bias primals_2 = self.W.weight primals_6 = self.W.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
gear/gfnn
KGCN
false
15,418
[ "MIT" ]
46
36667861caacba921469d43917d002896e832c3f
https://github.com/gear/gfnn/tree/36667861caacba921469d43917d002896e832c3f
L2NormLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/np/cnpwlr3beyjh6wkj6m5mcfmujk5zwfom2ueboydzgoynwj75nysc.py # Topologically Sorted Source Nodes: [sub, dist_in, sub_1, dist_out, sub_2, norm_2, loss], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.div] # Source node to ATen node mapping: # dist_in => pow_1, pow_2, sum_1 # dist_out => pow_3, pow_4, sum_2 # loss => div # norm_2 => pow_5, pow_6, sum_3 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg2_1, %arg3_1), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %pow_4), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, None), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_6, 4), kwargs = {}) triton_per_fused_div_linalg_vector_norm_sub_0 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_linalg_vector_norm_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp20 = tl.load(in_ptr2 + (r0 + (64*r1)), None) tmp21 = tl.load(in_ptr3 + (r0 + (64*r1)), None) tmp24 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None) tmp25 = tl.load(in_ptr3 + (16 + r0 + (64*r1)), None) tmp29 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None) tmp30 = tl.load(in_ptr3 + (32 + r0 + (64*r1)), None) tmp34 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None) tmp35 = tl.load(in_ptr3 + (48 + r0 + (64*r1)), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp22 = tmp20 - tmp21 tmp23 = tmp22 * tmp22 tmp26 = tmp24 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tmp23 + tmp27 tmp31 = tmp29 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tmp28 + tmp32 tmp36 = tmp34 - tmp35 tmp37 = tmp36 * tmp36 tmp38 = tmp33 + tmp37 tmp39 = libdevice.sqrt(tmp38) tmp40 = tmp19 - tmp39 tmp41 = tmp40 * tmp40 tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = libdevice.sqrt(tmp44) tmp46 = 0.25 tmp47 = tmp45 * tmp46 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp47, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [sub, dist_in, sub_1, dist_out, sub_2, norm_2, loss], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.div] stream0 = get_raw_stream(0) triton_per_fused_div_linalg_vector_norm_sub_0.run(buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class L2NormLoss(nn.Module): def __init__(self): super(L2NormLoss, self).__init__() def forward(self, x1, x2, y1, y2): dist_in = torch.norm(x1 - x2, dim=1, keepdim=True) dist_out = torch.norm(y1 - y2, dim=1, keepdim=True) loss = torch.norm(dist_in - dist_out) / x1.size(0) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_linalg_vector_norm_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp15 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp20 = tl.load(in_ptr2 + (r0 + 64 * r1), None) tmp21 = tl.load(in_ptr3 + (r0 + 64 * r1), None) tmp24 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None) tmp25 = tl.load(in_ptr3 + (16 + r0 + 64 * r1), None) tmp29 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None) tmp30 = tl.load(in_ptr3 + (32 + r0 + 64 * r1), None) tmp34 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None) tmp35 = tl.load(in_ptr3 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp22 = tmp20 - tmp21 tmp23 = tmp22 * tmp22 tmp26 = tmp24 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tmp23 + tmp27 tmp31 = tmp29 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tmp28 + tmp32 tmp36 = tmp34 - tmp35 tmp37 = tmp36 * tmp36 tmp38 = tmp33 + tmp37 tmp39 = libdevice.sqrt(tmp38) tmp40 = tmp19 - tmp39 tmp41 = tmp40 * tmp40 tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = libdevice.sqrt(tmp44) tmp46 = 0.25 tmp47 = tmp45 * tmp46 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp47, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_div_linalg_vector_norm_sub_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, class L2NormLossNew(nn.Module): def __init__(self): super(L2NormLossNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
gfiumara/MSU-LatentAFIS
L2NormLoss
false
15,419
[ "MIT" ]
53
682464b0bc4501977f1304c51e2638c0ee89d87c
https://github.com/gfiumara/MSU-LatentAFIS/tree/682464b0bc4501977f1304c51e2638c0ee89d87c
AttLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/36/c36xldnuc3t465xlrbmm4yvftzdrqqjr5avgkhmu7tkzbkcsyqp3.py # Topologically Sorted Source Nodes: [att_singal_1, att_singal_2, att_singal_3], Original ATen: [aten.relu, aten.mul, aten.sum] # Source node to ATen node mapping: # att_singal_1 => relu # att_singal_2 => mul # att_singal_3 => sum_1 # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %primals_3), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) triton_poi_fused_mul_relu_sum_0 = async_compile.triton('triton_poi_fused_mul_relu_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_relu_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp13 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = tmp6 * tmp3 tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp10 * tmp3 tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = tmp14 * tmp3 tmp16 = tmp12 + tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dm/cdmkcxuzpnailvibeivaikqdr4zvashgzwju7qijhq5aizlo3aor.py # Topologically Sorted Source Nodes: [att_singal_4], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_singal_4 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sum_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kt/cktghousutx6xui2sl2rvevzmb7gkacvfhntjq5n2xzeu7v57oz6.py # Topologically Sorted Source Nodes: [att_singal_4], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_singal_4 => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_singal], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_singal_1, att_singal_2, att_singal_3], Original ATen: [aten.relu, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_mul_relu_sum_0.run(buf0, primals_3, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_singal_4], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [att_singal_4], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) del buf2 return (buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as fn class AttLayer(nn.Module): """Calculate the attention signal(weight) according the input tensor. Args: infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim]. Returns: torch.FloatTensor: Attention weight of input. shape of [batch_size, M]. """ def __init__(self, in_dim, att_dim): super(AttLayer, self).__init__() self.in_dim = in_dim self.att_dim = att_dim self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim, bias=False) self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True) def forward(self, infeatures): att_singal = self.w(infeatures) att_singal = fn.relu(att_singal) att_singal = torch.mul(att_singal, self.h) att_singal = torch.sum(att_singal, dim=2) att_singal = fn.softmax(att_singal, dim=1) return att_singal def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'att_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_relu_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = tmp6 * tmp3 tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp10 * tmp3 tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = tmp14 * tmp3 tmp16 = tmp12 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_sum_0[grid(64)](buf0, primals_3, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf3 class AttLayerNew(nn.Module): """Calculate the attention signal(weight) according the input tensor. Args: infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim]. Returns: torch.FloatTensor: Attention weight of input. shape of [batch_size, M]. """ def __init__(self, in_dim, att_dim): super(AttLayerNew, self).__init__() self.in_dim = in_dim self.att_dim = att_dim self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim, bias=False) self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True) def forward(self, input_0): primals_3 = self.h primals_1 = self.w.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
geekinglcq/HRec
AttLayer
false
15,420
[ "MIT" ]
49
b3a67f7721e6e73a7af37d308b5b00e9df68d495
https://github.com/geekinglcq/HRec/tree/b3a67f7721e6e73a7af37d308b5b00e9df68d495
VisErrorLossV2
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qe/cqe6m5n4wbbdqpxnyqeo6ipbpx2yvh7ye5ggiq7wmqu4yfd4g5f7.py # Topologically Sorted Source Nodes: [amplitude_1, amplitude], Original ATen: [aten.max] # Source node to ATen node mapping: # amplitude => max_1 # amplitude_1 => max_2 # Graph fragment: # %max_2 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%select_2,), kwargs = {}) # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%select,), kwargs = {}) triton_per_fused_max_0 = async_compile.triton('triton_per_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp3, None) tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3i/c3ipz3uieuztninxw3dqqqqjpig2wg56q7uyiwfijdkqybdzlyjs.py # Topologically Sorted Source Nodes: [sub_1, diff_1, truediv_4, gt_1, vismap_1, eq_2, pos_ids_1, float_5, mul_4, sum_9, float_6, sum_11, truediv_5, le_1, eq_3, neg_ids_1, float_7, mul_5, sum_13, float_8, sum_15, sub, diff, truediv, gt, vismap, eq, pos_ids, float_1, mul, sum_1, float_2, sum_3, truediv_1, le, eq_1, neg_ids, float_3, mul_1, sum_5, float_4, sum_7], Original ATen: [aten.sub, aten.abs, aten.div, aten.gt, aten.repeat, aten.eq, aten.bitwise_and, aten._to_copy, aten.mul, aten.sum, aten.le] # Source node to ATen node mapping: # diff => abs_1 # diff_1 => abs_2 # eq => eq # eq_1 => eq_1 # eq_2 => eq_2 # eq_3 => eq_3 # float_1 => convert_element_type # float_2 => convert_element_type_1 # float_3 => convert_element_type_2 # float_4 => convert_element_type_3 # float_5 => convert_element_type_4 # float_6 => convert_element_type_5 # float_7 => convert_element_type_6 # float_8 => convert_element_type_7 # gt => gt # gt_1 => gt_1 # le => le # le_1 => le_1 # mul => mul # mul_1 => mul_1 # mul_4 => mul_4 # mul_5 => mul_5 # neg_ids => bitwise_and_1 # neg_ids_1 => bitwise_and_3 # pos_ids => bitwise_and # pos_ids_1 => bitwise_and_2 # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # sum_11 => sum_11 # sum_13 => sum_13 # sum_15 => sum_15 # sum_3 => sum_3 # sum_5 => sum_5 # sum_7 => sum_7 # sum_9 => sum_9 # truediv => div # truediv_1 => div_1 # truediv_4 => div_4 # truediv_5 => div_5 # vismap => repeat # vismap_1 => repeat_1 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %view_4), kwargs = {}) # %abs_2 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_2, 10), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%view_3, %div_4), kwargs = {}) # %repeat_1 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%arg2_1, [1, 1, 16]), kwargs = {}) # %eq_2 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%repeat_1, 1), kwargs = {}) # %bitwise_and_2 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt_1, %eq_2), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_2, torch.float32), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_2, %convert_element_type_4), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [2]), kwargs = {}) # %convert_element_type_5 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_2, torch.float32), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_5, [2]), kwargs = {}) # %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_2, 10), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Tensor](args = (%view_3, %div_5), kwargs = {}) # %eq_3 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%repeat_1, 1), kwargs = {}) # %bitwise_and_3 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%le_1, %eq_3), kwargs = {}) # %convert_element_type_6 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_3, torch.float32), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_2, %convert_element_type_6), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [2]), kwargs = {}) # %convert_element_type_7 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_3, torch.float32), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_7, [2]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {}) # %abs_1 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_1, 10), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%view, %div), kwargs = {}) # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%arg2_1, [1, 1, 16]), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%repeat, 1), kwargs = {}) # %bitwise_and : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt, %eq), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, %convert_element_type), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and, torch.float32), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_1, [2]), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_1, 10), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Tensor](args = (%view, %div_1), kwargs = {}) # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%repeat, 1), kwargs = {}) # %bitwise_and_1 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%le, %eq_1), kwargs = {}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_1, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, %convert_element_type_2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {}) # %convert_element_type_3 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_1, torch.float32), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_3, [2]), kwargs = {}) triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_repeat_sub_sum_1 = async_compile.triton('triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_repeat_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: 'i32', 15: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_repeat_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 8, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_repeat_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 16 x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r2 + (16*x0) + (256*x1) + (256*((r2 + (16*x0)) // 256))), xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr2 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr4 + (x3), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr5 + (0)) tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = tmp0 - tmp3 tmp5 = tl_math.abs(tmp4) tmp8 = 0.1 tmp9 = tmp7 * tmp8 tmp10 = tmp0 > tmp9 tmp12 = 1.0 tmp13 = tmp11 == tmp12 tmp14 = tmp10 & tmp13 tmp15 = tmp14.to(tl.float32) tmp16 = tmp5 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp0 <= tmp9 tmp22 = tmp21 & tmp13 tmp23 = tmp22.to(tl.float32) tmp24 = tmp5 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.where(xmask, tmp25, 0) tmp28 = tl.sum(tmp27, 1)[:, None] tmp29 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp31 = tl.where(xmask, tmp29, 0) tmp32 = tl.sum(tmp31, 1)[:, None] tmp33 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp38 = triton_helpers.maximum(tmp2, tmp37) tmp39 = tmp0 - tmp38 tmp40 = tl_math.abs(tmp39) tmp43 = tmp42 * tmp8 tmp44 = tmp0 > tmp43 tmp45 = tmp44 & tmp13 tmp46 = tmp45.to(tl.float32) tmp47 = tmp40 * tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.where(xmask, tmp48, 0) tmp51 = tl.sum(tmp50, 1)[:, None] tmp52 = tmp0 <= tmp43 tmp53 = tmp52 & tmp13 tmp54 = tmp53.to(tl.float32) tmp55 = tmp40 * tmp54 tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.where(xmask, tmp56, 0) tmp59 = tl.sum(tmp58, 1)[:, None] tmp60 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK]) tmp62 = tl.where(xmask, tmp60, 0) tmp63 = tl.sum(tmp62, 1)[:, None] tmp64 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp66 = tl.where(xmask, tmp64, 0) tmp67 = tl.sum(tmp66, 1)[:, None] tl.store(out_ptr0 + (x3), tmp20, xmask) tl.store(out_ptr1 + (x3), tmp28, xmask) tl.store(out_ptr2 + (x3), tmp32, xmask) tl.store(out_ptr3 + (x3), tmp36, xmask) tl.store(out_ptr4 + (x3), tmp51, xmask) tl.store(out_ptr5 + (x3), tmp59, xmask) tl.store(out_ptr6 + (x3), tmp63, xmask) tl.store(out_ptr7 + (x3), tmp67, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/w7/cw7gjf4k3utqy4bwtidvkicayfzpm3dy3plvdqif4rmiom6l4rt2.py # Topologically Sorted Source Nodes: [sum_10, sum_12, add_4, pos_loss_1, mul_6, sum_14, sum_16, add_5, neg_loss_1, mul_7, total_loss_1], Original ATen: [aten.sum, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add_4 => add_4 # add_5 => add_5 # mul_6 => mul_6 # mul_7 => mul_7 # neg_loss_1 => div_7 # pos_loss_1 => div_6 # sum_10 => sum_10 # sum_12 => sum_12 # sum_14 => sum_14 # sum_16 => sum_16 # total_loss_1 => add_6 # Graph fragment: # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_9, [0]), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_11, [0]), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_12, 0.0001), kwargs = {}) # %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_10, %add_4), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_6, 0.5), kwargs = {}) # %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_13, [0]), kwargs = {}) # %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_15, [0]), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_16, 0.0001), kwargs = {}) # %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_14, %add_5), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_7, 0.5), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %mul_7), kwargs = {}) triton_poi_fused_add_div_mul_sum_2 = async_compile.triton('triton_poi_fused_add_div_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0), xmask) tmp7 = tl.load(in_ptr1 + (x0), xmask) tmp8 = tl.load(in_ptr1 + (16 + x0), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0), xmask) tmp12 = tl.load(in_ptr1 + (48 + x0), xmask) tmp19 = tl.load(in_ptr2 + (x0), xmask) tmp20 = tl.load(in_ptr2 + (16 + x0), xmask) tmp22 = tl.load(in_ptr2 + (32 + x0), xmask) tmp24 = tl.load(in_ptr2 + (48 + x0), xmask) tmp26 = tl.load(in_ptr3 + (x0), xmask) tmp27 = tl.load(in_ptr3 + (16 + x0), xmask) tmp29 = tl.load(in_ptr3 + (32 + x0), xmask) tmp31 = tl.load(in_ptr3 + (48 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tl.store(out_ptr0 + (x0), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/la/clartgx64xp7tcle7b7taoq3ylrpldz6d5i56ezbo73c3sskufis.py # Topologically Sorted Source Nodes: [loss2], Original ATen: [aten.mean] # Source node to ATen node mapping: # loss2 => mean_1 # Graph fragment: # %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%getitem,), kwargs = {}) triton_per_fused_mean_3 = async_compile.triton('triton_per_fused_mean_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 8], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 8 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mn/cmnsxlpoznd2xtq6qfuwjq4af64wagxwfhob4zeyihbzc76iy43d.py # Topologically Sorted Source Nodes: [sum_2, sum_4, add, pos_loss, mul_2, sum_6, sum_8, add_1, neg_loss, mul_3, total_loss, mean, loss1, loss2, add_7], Original ATen: [aten.sum, aten.add, aten.div, aten.mul, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_7 => add_7 # loss1 => add_3 # loss2 => mean_1 # mean => mean # mul_2 => mul_2 # mul_3 => mul_3 # neg_loss => div_3 # pos_loss => div_2 # sum_2 => sum_2 # sum_4 => sum_4 # sum_6 => sum_6 # sum_8 => sum_8 # total_loss => add_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [0]), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_3, [0]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, 0.0001), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %add), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, 0.5), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_5, [0]), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_7, [0]), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_8, 0.0001), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, %add_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, 0.5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_2,), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 0), kwargs = {}) # %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%getitem,), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mean_1), kwargs = {}) triton_per_fused_add_div_mean_mul_sum_4 = async_compile.triton('triton_per_fused_add_div_mean_mul_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=(7,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sum_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_sum_4(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (16 + r0), None) tmp3 = tl.load(in_ptr0 + (32 + r0), None) tmp5 = tl.load(in_ptr0 + (48 + r0), None) tmp7 = tl.load(in_ptr1 + (r0), None) tmp8 = tl.load(in_ptr1 + (16 + r0), None) tmp10 = tl.load(in_ptr1 + (32 + r0), None) tmp12 = tl.load(in_ptr1 + (48 + r0), None) tmp19 = tl.load(in_ptr2 + (r0), None) tmp20 = tl.load(in_ptr2 + (16 + r0), None) tmp22 = tl.load(in_ptr2 + (32 + r0), None) tmp24 = tl.load(in_ptr2 + (48 + r0), None) tmp26 = tl.load(in_ptr3 + (r0), None) tmp27 = tl.load(in_ptr3 + (16 + r0), None) tmp29 = tl.load(in_ptr3 + (32 + r0), None) tmp31 = tl.load(in_ptr3 + (48 + r0), None) tmp44 = tl.load(in_out_ptr1 + (0)) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, 1]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 16.0 tmp41 = tmp39 / tmp40 tmp42 = 0.0 tmp43 = tmp41 + tmp42 tmp46 = 8.0 tmp47 = tmp45 / tmp46 tmp48 = tmp43 + tmp47 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp47, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp48, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 16, 4, 4), (1024, 256, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 16, 1), (16, 1, 1)) assert_size_stride(arg3_1, (4, 4, 16, 4), (256, 64, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf9 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [amplitude_1, amplitude], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_per_fused_max_0.run(arg0_1, buf0, buf9, 1, 1024, grid=grid(1), stream=stream0) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf10 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf12 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf13 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1, diff_1, truediv_4, gt_1, vismap_1, eq_2, pos_ids_1, float_5, mul_4, sum_9, float_6, sum_11, truediv_5, le_1, eq_3, neg_ids_1, float_7, mul_5, sum_13, float_8, sum_15, sub, diff, truediv, gt, vismap, eq, pos_ids, float_1, mul, sum_1, float_2, sum_3, truediv_1, le, eq_1, neg_ids, float_3, mul_1, sum_5, float_4, sum_7], Original ATen: [aten.sub, aten.abs, aten.div, aten.gt, aten.repeat, aten.eq, aten.bitwise_and, aten._to_copy, aten.mul, aten.sum, aten.le] triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_repeat_sub_sum_1.run(arg0_1, arg3_1, buf0, arg2_1, arg1_1, buf9, buf1, buf3, buf2, buf4, buf10, buf12, buf11, buf13, 64, 16, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 buf5 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sum_10, sum_12, add_4, pos_loss_1, mul_6, sum_14, sum_16, add_5, neg_loss_1, mul_7, total_loss_1], Original ATen: [aten.sum, aten.add, aten.div, aten.mul] triton_poi_fused_add_div_mul_sum_2.run(buf1, buf2, buf3, buf4, buf5, 16, grid=grid(16), stream=stream0) del buf1 del buf2 del buf3 del buf4 # Topologically Sorted Source Nodes: [topk], Original ATen: [aten.topk] buf6 = torch.ops.aten.topk.default(buf5, 8) del buf5 buf7 = buf6[0] del buf6 buf17 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [loss2], Original ATen: [aten.mean] triton_per_fused_mean_3.run(buf7, buf17, 1, 8, grid=grid(1), stream=stream0) del buf7 buf15 = buf0; del buf0 # reuse buf16 = buf15; del buf15 # reuse buf18 = buf17; del buf17 # reuse buf19 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sum_2, sum_4, add, pos_loss, mul_2, sum_6, sum_8, add_1, neg_loss, mul_3, total_loss, mean, loss1, loss2, add_7], Original ATen: [aten.sum, aten.add, aten.div, aten.mul, aten.mean] triton_per_fused_add_div_mean_mul_sum_4.run(buf16, buf18, buf10, buf11, buf12, buf13, buf19, 1, 16, grid=grid(1), stream=stream0) del buf10 del buf11 del buf12 del buf13 return (buf19, buf16, buf18, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 16, 4, 4), (1024, 256, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 16, 1), (16, 1, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 16, 4), (256, 64, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class VisErrorLossV2(nn.Module): def __init__(self): super(VisErrorLossV2, self).__init__() def compute_l1_weighted_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) amplitude = torch.max(hm_targets) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) pos_ids = (hm_targets > amplitude / 10) & (vismap == 1) neg_ids = (hm_targets <= amplitude / 10) & (vismap == 1) diff = (hm_targets - hm_preds).abs() pos_loss = (diff * pos_ids.float()).sum(2).sum(0) / (pos_ids.float( ).sum(2).sum(0) + epsilon) neg_loss = (diff * neg_ids.float()).sum(2).sum(0) / (neg_ids.float( ).sum(2).sum(0) + epsilon) total_loss = 0.5 * pos_loss + 0.5 * neg_loss if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def compute_l2_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) ids = vismap == 1 diff = (hm_targets - hm_preds) ** 2 total_loss = (diff * ids.float()).sum(2).sum(0) / (ids.float().sum( 2).sum(0) + epsilon) if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def forward(self, hm_targets, hm_preds1, hm_preds2, vismap): """ :param hm_targets: list of 4 elements, each is [batch size, keypoint number, h, w] :param hm_preds1: list of 4 elements, each is [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ loss1 = 0 for t, p in zip(hm_targets, hm_preds1): loss1 += self.compute_l1_weighted_loss(t, p, vismap) break loss2 = self.compute_l1_weighted_loss(hm_targets[0], hm_preds2, vismap, ohem=0.5) return loss1 + loss2, loss1, loss2 def get_inputs(): return [torch.rand([4, 4, 16, 4, 4]), torch.rand([4, 4, 4, 4]), torch. rand([4, 4, 16, 4]), torch.rand([4, 16, 1])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_repeat_sub_sum_1( in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r2 + 16 * x0 + 256 * x1 + 256 * ((r2 + 16 * x0) // 256)), xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr2 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr5 + 0) tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = tmp0 - tmp3 tmp5 = tl_math.abs(tmp4) tmp8 = 0.1 tmp9 = tmp7 * tmp8 tmp10 = tmp0 > tmp9 tmp12 = 1.0 tmp13 = tmp11 == tmp12 tmp14 = tmp10 & tmp13 tmp15 = tmp14.to(tl.float32) tmp16 = tmp5 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp0 <= tmp9 tmp22 = tmp21 & tmp13 tmp23 = tmp22.to(tl.float32) tmp24 = tmp5 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.where(xmask, tmp25, 0) tmp28 = tl.sum(tmp27, 1)[:, None] tmp29 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp31 = tl.where(xmask, tmp29, 0) tmp32 = tl.sum(tmp31, 1)[:, None] tmp33 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp38 = triton_helpers.maximum(tmp2, tmp37) tmp39 = tmp0 - tmp38 tmp40 = tl_math.abs(tmp39) tmp43 = tmp42 * tmp8 tmp44 = tmp0 > tmp43 tmp45 = tmp44 & tmp13 tmp46 = tmp45.to(tl.float32) tmp47 = tmp40 * tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.where(xmask, tmp48, 0) tmp51 = tl.sum(tmp50, 1)[:, None] tmp52 = tmp0 <= tmp43 tmp53 = tmp52 & tmp13 tmp54 = tmp53.to(tl.float32) tmp55 = tmp40 * tmp54 tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.where(xmask, tmp56, 0) tmp59 = tl.sum(tmp58, 1)[:, None] tmp60 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK]) tmp62 = tl.where(xmask, tmp60, 0) tmp63 = tl.sum(tmp62, 1)[:, None] tmp64 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp66 = tl.where(xmask, tmp64, 0) tmp67 = tl.sum(tmp66, 1)[:, None] tl.store(out_ptr0 + x3, tmp20, xmask) tl.store(out_ptr1 + x3, tmp28, xmask) tl.store(out_ptr2 + x3, tmp32, xmask) tl.store(out_ptr3 + x3, tmp36, xmask) tl.store(out_ptr4 + x3, tmp51, xmask) tl.store(out_ptr5 + x3, tmp59, xmask) tl.store(out_ptr6 + x3, tmp63, xmask) tl.store(out_ptr7 + x3, tmp67, xmask) @triton.jit def triton_poi_fused_add_div_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (16 + x0), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0), xmask) tmp7 = tl.load(in_ptr1 + x0, xmask) tmp8 = tl.load(in_ptr1 + (16 + x0), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0), xmask) tmp12 = tl.load(in_ptr1 + (48 + x0), xmask) tmp19 = tl.load(in_ptr2 + x0, xmask) tmp20 = tl.load(in_ptr2 + (16 + x0), xmask) tmp22 = tl.load(in_ptr2 + (32 + x0), xmask) tmp24 = tl.load(in_ptr2 + (48 + x0), xmask) tmp26 = tl.load(in_ptr3 + x0, xmask) tmp27 = tl.load(in_ptr3 + (16 + x0), xmask) tmp29 = tl.load(in_ptr3 + (32 + x0), xmask) tmp31 = tl.load(in_ptr3 + (48 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tl.store(out_ptr0 + x0, tmp36, xmask) @triton.jit def triton_per_fused_mean_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused_add_div_mean_mul_sum_4(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (16 + r0), None) tmp3 = tl.load(in_ptr0 + (32 + r0), None) tmp5 = tl.load(in_ptr0 + (48 + r0), None) tmp7 = tl.load(in_ptr1 + r0, None) tmp8 = tl.load(in_ptr1 + (16 + r0), None) tmp10 = tl.load(in_ptr1 + (32 + r0), None) tmp12 = tl.load(in_ptr1 + (48 + r0), None) tmp19 = tl.load(in_ptr2 + r0, None) tmp20 = tl.load(in_ptr2 + (16 + r0), None) tmp22 = tl.load(in_ptr2 + (32 + r0), None) tmp24 = tl.load(in_ptr2 + (48 + r0), None) tmp26 = tl.load(in_ptr3 + r0, None) tmp27 = tl.load(in_ptr3 + (16 + r0), None) tmp29 = tl.load(in_ptr3 + (32 + r0), None) tmp31 = tl.load(in_ptr3 + (48 + r0), None) tmp44 = tl.load(in_out_ptr1 + 0) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, 1]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 16.0 tmp41 = tmp39 / tmp40 tmp42 = 0.0 tmp43 = tmp41 + tmp42 tmp46 = 8.0 tmp47 = tmp45 / tmp46 tmp48 = tmp43 + tmp47 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp47, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp48, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 16, 4, 4), (1024, 256, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 16, 1), (16, 1, 1)) assert_size_stride(arg3_1, (4, 4, 16, 4), (256, 64, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf9 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_max_0[grid(1)](arg0_1, buf0, buf9, 1, 1024, num_warps=8, num_stages=1) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf10 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf12 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf11 = empty_strided_cuda((4, 16), (16, 1), torch.float32) buf13 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_repeat_sub_sum_1[ grid(64)](arg0_1, arg3_1, buf0, arg2_1, arg1_1, buf9, buf1, buf3, buf2, buf4, buf10, buf12, buf11, buf13, 64, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 buf5 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_add_div_mul_sum_2[grid(16)](buf1, buf2, buf3, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf2 del buf3 del buf4 buf6 = torch.ops.aten.topk.default(buf5, 8) del buf5 buf7 = buf6[0] del buf6 buf17 = buf9 del buf9 triton_per_fused_mean_3[grid(1)](buf7, buf17, 1, 8, XBLOCK=1, num_warps=2, num_stages=1) del buf7 buf15 = buf0 del buf0 buf16 = buf15 del buf15 buf18 = buf17 del buf17 buf19 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_div_mean_mul_sum_4[grid(1)](buf16, buf18, buf10, buf11, buf12, buf13, buf19, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf10 del buf11 del buf12 del buf13 return buf19, buf16, buf18 class VisErrorLossV2New(nn.Module): def __init__(self): super(VisErrorLossV2New, self).__init__() def compute_l1_weighted_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) amplitude = torch.max(hm_targets) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) pos_ids = (hm_targets > amplitude / 10) & (vismap == 1) neg_ids = (hm_targets <= amplitude / 10) & (vismap == 1) diff = (hm_targets - hm_preds).abs() pos_loss = (diff * pos_ids.float()).sum(2).sum(0) / (pos_ids.float( ).sum(2).sum(0) + epsilon) neg_loss = (diff * neg_ids.float()).sum(2).sum(0) / (neg_ids.float( ).sum(2).sum(0) + epsilon) total_loss = 0.5 * pos_loss + 0.5 * neg_loss if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def compute_l2_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) ids = vismap == 1 diff = (hm_targets - hm_preds) ** 2 total_loss = (diff * ids.float()).sum(2).sum(0) / (ids.float().sum( 2).sum(0) + epsilon) if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg3_1 = input_2 arg2_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1], output[2]
gathierry/FashionAI-KeyPointsDetectionOfApparel
VisErrorLossV2
false
15,421
[ "Apache-2.0" ]
174
2e0942b42b4a9cd974cdddc151675738dc8a8cb4
https://github.com/gathierry/FashionAI-KeyPointsDetectionOfApparel/tree/2e0942b42b4a9cd974cdddc151675738dc8a8cb4
RobertaOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ai/cai32p2ssjvpyulvuzcicdszqe3thbavgxn4jeed6uatjnl7yq2s.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dt/cdtfyiepqseugx5m3udiopa26uo6fdp2fyvmfcoipxuqyqqnb2l6.py # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # hidden_states_2 => add_1, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mn/cmntyljhuirhsdjg2yosgzllpkpxqedxgoyk6gunquq2rf3kl7u5.py # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # hidden_states_2 => add_1, add_2, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {}) triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf1, primals_2, primals_4, 256, grid=grid(256), stream=stream0) del primals_2 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_5, primals_6, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_6 return (buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.utils.checkpoint class RobertaOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1 class RobertaOutputNew(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
IntelLabs/Model-Compression-Research-Package
RobertaOutput
false
15,422
[ "Apache-2.0" ]
58
69aecbf5cc73b10fab88a13d8ca6d8314d284c0b
https://github.com/IntelLabs/Model-Compression-Research-Package/tree/69aecbf5cc73b10fab88a13d8ca6d8314d284c0b
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/c7/cc7nzp6frw2bfmf3e4bewyyp25nim5n62waymtfhklsku7ifuyac.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 128 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 8 y1 = (yindex // 8) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (8*x2) + (72*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7f/c7fyqczlcrwdsyxgwrbl6zg3qqldo6kvvpsrx3c7fkdiwolsaexa.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = (yindex // 16) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (16*x2) + (144*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qu/cqugvzv6lrf2m6uwzc5zmg3xhqknj6c6dlqkxlmvriplsh2hzrof.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = (yindex // 16) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (16*x2) + (144*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zl/czlyswam27u52myc4t5fjvoz4swd2lusoan4jvvdcr2t5w2zc5g2.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 576 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 24 y1 = (yindex // 24) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (24*x2) + (216*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5y/c5ywbgnjkvm6s6xjacf3k5g527gbslbtbkqhjvy6ws7z7vivihpk.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 960 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 24 y1 = (yindex // 24) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (24*x2) + (216*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hp/chponyzcswbmtcjajxpyfuodvhjomobm63jqkv5dxmiq2cbmjkup.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 3200 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 40 y1 = (yindex // 40) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (40*x2) + (360*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/t3/ct3lwup5dtlg7dqvltbu33azvptwe2grfz7bso3rpyrgtg4gumi3.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32, 8192], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32 xnumel = 4900 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 8 y1 = (yindex // 8) tmp0 = tl.load(in_ptr0 + (x2 + (4900*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + (8*x2) + (39200*y1)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ba/cbaflnvvab6f4mlzsmlffeeeogsfxnwiir2k44ii7a6hdjqgq3jm.py # Topologically Sorted Source Nodes: [prelu], Original ATen: [aten._prelu_kernel] # Source node to ATen node mapping: # prelu => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused__prelu_kernel_7 = async_compile.triton('triton_poi_fused__prelu_kernel_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 156800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (0)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tb/ctbec5d352es5vkqspaek6kpr3qfo642limmlxgn2n7dmr7zspyk.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where, [2, 2], [2, 2], [0, 0], True), kwargs = {}) triton_poi_fused_avg_pool2d_8 = async_compile.triton('triton_poi_fused_avg_pool2d_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 39200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) % 35 x2 = (xindex // 280) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1) + (1120*x2)), xmask) tmp1 = tl.load(in_ptr0 + (8 + x0 + (16*x1) + (1120*x2)), xmask) tmp3 = tl.load(in_ptr0 + (560 + x0 + (16*x1) + (1120*x2)), xmask) tmp5 = tl.load(in_ptr0 + (568 + x0 + (16*x1) + (1120*x2)), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bs/cbsuymbyfaxcjzcywommyd6psugubyy5qc4eyxzrsgbsazjcovef.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_5, %primals_6, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %convolution_1), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_9 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 69696 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jh/cjhqcjlpsnvxz3jf2mwneum6kutby5wfrl4e4qoesum4zui4ytg5.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %convolution_2), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_10 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_10(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 61504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/v2/cv2mresyrd2x2rgredwbtycq43oow5v7sxc66prtwsxgwernjvht.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x_3 => avg_pool2d_1 # Graph fragment: # %avg_pool2d_1 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_2, [2, 2], [2, 2], [0, 0], True), kwargs = {}) triton_poi_fused_avg_pool2d_11 = async_compile.triton('triton_poi_fused_avg_pool2d_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 256) % 16 x1 = (xindex // 16) % 16 x0 = xindex % 16 x3 = (xindex // 4096) x6 = xindex tmp0 = 2*x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 31, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2*x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + (32*x1) + (992*x2) + (15376*x3)), tmp10, other=0.0) tmp12 = 1 + (2*x1) tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (16 + x0 + (32*x1) + (992*x2) + (15376*x3)), tmp16, other=0.0) tmp18 = tmp17 + tmp11 tmp19 = 1 + (2*x2) tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (496 + x0 + (32*x1) + (992*x2) + (15376*x3)), tmp23, other=0.0) tmp25 = tmp24 + tmp18 tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (512 + x0 + (32*x1) + (992*x2) + (15376*x3)), tmp26, other=0.0) tmp28 = tmp27 + tmp25 tmp29 = (((31) * ((31) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (31)))*((31) * ((31) <= (2 + (2*x2))) + (2 + (2*x2)) * ((2 + (2*x2)) < (31)))) + ((-2)*x1*((31) * ((31) <= (2 + (2*x2))) + (2 + (2*x2)) * ((2 + (2*x2)) < (31)))) + ((-2)*x2*((31) * ((31) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (31)))) + (4*x1*x2) tmp30 = tmp28 / tmp29 tl.store(out_ptr0 + (x6), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gz/cgzctsz7ke2zlqdb7qd73n35alvgfp7p2wdb4wrnptbulyc65232.py # Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_4 => gt_3, mul_3, where_3 # Graph fragment: # %convolution_3 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_1, %primals_11, %primals_12, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %convolution_3), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_12 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_12(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 24 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/i7/ci7zpe45tpg6p2cenph4fdctdx4bhb4my6zxnd3esrnlsxvwc2se.py # Topologically Sorted Source Nodes: [conv2d_4, x_5], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_5 => gt_4, mul_4, where_4 # Graph fragment: # %convolution_4 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %convolution_4), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_13 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_13(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 13824 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 24 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/q3/cq3sjud5wtwjglf72klly7e7gj5a7ntrgb7m3bqr4ibq2nw4lf3j.py # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x_6 => avg_pool2d_2 # Graph fragment: # %avg_pool2d_2 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_4, [2, 2], [2, 2], [0, 0], True), kwargs = {}) triton_poi_fused_avg_pool2d_14 = async_compile.triton('triton_poi_fused_avg_pool2d_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_14(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 24 x1 = (xindex // 24) % 6 x2 = (xindex // 144) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (48*x1) + (576*x2)), xmask) tmp1 = tl.load(in_ptr0 + (24 + x0 + (48*x1) + (576*x2)), xmask) tmp3 = tl.load(in_ptr0 + (288 + x0 + (48*x1) + (576*x2)), xmask) tmp5 = tl.load(in_ptr0 + (312 + x0 + (48*x1) + (576*x2)), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ve/cvejxautwk6vjyzzcijn6dr6fe5l3vz7vqxywawqxgtl3cj2rz3b.py # Topologically Sorted Source Nodes: [conv2d_5, x_7], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # conv2d_5 => convolution_5 # x_7 => gt_5, mul_5, where_5 # Graph fragment: # %convolution_5 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_2, %primals_17, %primals_18, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %convolution_5), kwargs = {}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_5, %mul_5), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_15 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_15(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 5760 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 40 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6b/c6b3qihbsjreuoww43r6dvgfxaqqlm2f4efvfol5cfgzbcbwgkt2.py # Topologically Sorted Source Nodes: [conv2d_6, ip3], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # conv2d_6 => convolution_6 # ip3 => gt_6, mul_6, where_6 # Graph fragment: # %convolution_6 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_5, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, %convolution_6), kwargs = {}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_6), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_16 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 128], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_16(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 144 xnumel = 80 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 36 y3 = (yindex // 36) tmp0 = tl.load(in_out_ptr0 + (x1 + (80*y0)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (0)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, YBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.debug_barrier() tl.store(in_out_ptr0 + (x1 + (80*y0)), tmp2, xmask & ymask) tl.store(out_ptr0 + (y2 + (36*x1) + (2880*y3)), tmp8, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yc/cycfekrppcvpilftfkwsyozjyiqgtdmt7fougluog7fggpundo4s.py # Topologically Sorted Source Nodes: [ip3_2], Original ATen: [aten._prelu_kernel] # Source node to ATen node mapping: # ip3_2 => gt_7, mul_7, where_7 # Graph fragment: # %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm, 0), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, %addmm), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %addmm, %mul_7), kwargs = {}) triton_poi_fused__prelu_kernel_17 = async_compile.triton('triton_poi_fused__prelu_kernel_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (0)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30 = args args.clear() assert_size_stride(primals_1, (8, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 1, 144, 144), (20736, 20736, 144, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_6, (16, ), (1, )) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_9, (16, ), (1, )) assert_size_stride(primals_10, (1, ), (1, )) assert_size_stride(primals_11, (24, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_12, (24, ), (1, )) assert_size_stride(primals_13, (1, ), (1, )) assert_size_stride(primals_14, (24, 24, 3, 3), (216, 9, 3, 1)) assert_size_stride(primals_15, (24, ), (1, )) assert_size_stride(primals_16, (1, ), (1, )) assert_size_stride(primals_17, (40, 24, 3, 3), (216, 9, 3, 1)) assert_size_stride(primals_18, (40, ), (1, )) assert_size_stride(primals_19, (1, ), (1, )) assert_size_stride(primals_20, (80, 40, 3, 3), (360, 9, 3, 1)) assert_size_stride(primals_21, (80, ), (1, )) assert_size_stride(primals_22, (1, ), (1, )) assert_size_stride(primals_23, (128, 1280), (1280, 1)) assert_size_stride(primals_24, (128, ), (1, )) assert_size_stride(primals_25, (1, ), (1, )) assert_size_stride(primals_26, (128, 128), (128, 1)) assert_size_stride(primals_27, (128, ), (1, )) assert_size_stride(primals_28, (1, ), (1, )) assert_size_stride(primals_29, (42, 128), (128, 1)) assert_size_stride(primals_30, (42, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 8, 3, 3), (72, 1, 24, 8), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_5, buf0, 128, 9, grid=grid(128, 9), stream=stream0) del primals_5 buf1 = empty_strided_cuda((16, 16, 3, 3), (144, 1, 48, 16), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_8, buf1, 256, 9, grid=grid(256, 9), stream=stream0) del primals_8 buf2 = empty_strided_cuda((24, 16, 3, 3), (144, 1, 48, 16), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_11, buf2, 384, 9, grid=grid(384, 9), stream=stream0) del primals_11 buf3 = empty_strided_cuda((24, 24, 3, 3), (216, 1, 72, 24), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_14, buf3, 576, 9, grid=grid(576, 9), stream=stream0) del primals_14 buf4 = empty_strided_cuda((40, 24, 3, 3), (216, 1, 72, 24), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_17, buf4, 960, 9, grid=grid(960, 9), stream=stream0) del primals_17 buf5 = empty_strided_cuda((80, 40, 3, 3), (360, 1, 120, 40), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_20, buf5, 3200, 9, grid=grid(3200, 9), stream=stream0) del primals_20 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 70, 70), (39200, 4900, 70, 1)) buf7 = empty_strided_cuda((4, 8, 70, 70), (39200, 1, 560, 8), torch.float32) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf6, primals_2, buf7, 32, 4900, grid=grid(32, 4900), stream=stream0) del primals_2 buf8 = reinterpret_tensor(buf6, (4, 8, 70, 70), (39200, 1, 560, 8), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [prelu], Original ATen: [aten._prelu_kernel] triton_poi_fused__prelu_kernel_7.run(buf7, primals_4, buf8, 156800, grid=grid(156800), stream=stream0) buf9 = empty_strided_cuda((4, 8, 35, 35), (9800, 1, 280, 8), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_8.run(buf8, buf9, 39200, grid=grid(39200), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 16, 33, 33), (17424, 1, 528, 16)) buf11 = buf10; del buf10 # reuse buf12 = empty_strided_cuda((4, 16, 33, 33), (17424, 1, 528, 16), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_9.run(buf11, primals_6, primals_7, buf12, 69696, grid=grid(69696), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf12, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 16, 31, 31), (15376, 1, 496, 16)) buf14 = buf13; del buf13 # reuse buf15 = empty_strided_cuda((4, 16, 31, 31), (15376, 1, 496, 16), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_10.run(buf14, primals_9, primals_10, buf15, 61504, grid=grid(61504), stream=stream0) del primals_9 buf16 = empty_strided_cuda((4, 16, 16, 16), (4096, 1, 256, 16), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_11.run(buf15, buf16, 16384, grid=grid(16384), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 24, 14, 14), (4704, 1, 336, 24)) buf18 = buf17; del buf17 # reuse buf19 = empty_strided_cuda((4, 24, 14, 14), (4704, 1, 336, 24), torch.float32) # Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_12.run(buf18, primals_12, primals_13, buf19, 18816, grid=grid(18816), stream=stream0) del primals_12 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 24, 12, 12), (3456, 1, 288, 24)) buf21 = buf20; del buf20 # reuse buf22 = empty_strided_cuda((4, 24, 12, 12), (3456, 1, 288, 24), torch.float32) # Topologically Sorted Source Nodes: [conv2d_4, x_5], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_13.run(buf21, primals_15, primals_16, buf22, 13824, grid=grid(13824), stream=stream0) del primals_15 buf23 = empty_strided_cuda((4, 24, 6, 6), (864, 1, 144, 24), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_14.run(buf22, buf23, 3456, grid=grid(3456), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 40, 6, 6), (1440, 1, 240, 40)) buf25 = buf24; del buf24 # reuse buf26 = empty_strided_cuda((4, 40, 6, 6), (1440, 1, 240, 40), torch.float32) # Topologically Sorted Source Nodes: [conv2d_5, x_7], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_15.run(buf25, primals_18, primals_19, buf26, 5760, grid=grid(5760), stream=stream0) del primals_18 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf26, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 80, 6, 6), (2880, 1, 480, 80)) buf28 = buf27; del buf27 # reuse buf29 = empty_strided_cuda((4, 80, 6, 6), (2880, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_6, ip3], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_16.run(buf28, primals_21, primals_22, buf29, 144, 80, grid=grid(144, 80), stream=stream0) del primals_21 buf30 = empty_strided_cuda((9, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_24, reinterpret_tensor(buf29, (9, 1280), (1280, 1), 0), reinterpret_tensor(primals_23, (1280, 128), (1, 1280), 0), alpha=1, beta=1, out=buf30) del primals_24 buf31 = empty_strided_cuda((9, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [ip3_2], Original ATen: [aten._prelu_kernel] triton_poi_fused__prelu_kernel_17.run(buf30, primals_25, buf31, 1152, grid=grid(1152), stream=stream0) buf32 = empty_strided_cuda((9, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_27, buf31, reinterpret_tensor(primals_26, (128, 128), (1, 128), 0), alpha=1, beta=1, out=buf32) del primals_27 buf33 = empty_strided_cuda((9, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [ip3_3], Original ATen: [aten._prelu_kernel] triton_poi_fused__prelu_kernel_17.run(buf32, primals_28, buf33, 1152, grid=grid(1152), stream=stream0) buf34 = empty_strided_cuda((9, 42), (42, 1), torch.float32) # Topologically Sorted Source Nodes: [ip3_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_30, buf33, reinterpret_tensor(primals_29, (128, 42), (1, 128), 0), alpha=1, beta=1, out=buf34) del primals_30 return (buf34, primals_1, primals_3, primals_4, buf0, primals_7, buf1, primals_10, buf2, primals_13, buf3, primals_16, buf4, primals_19, buf5, primals_22, primals_25, primals_28, buf7, buf8, buf9, buf11, buf12, buf14, buf15, buf16, buf18, buf19, buf21, buf22, buf23, buf25, buf26, buf28, reinterpret_tensor(buf29, (9, 1280), (1280, 1), 0), buf30, buf31, buf32, buf33, primals_29, primals_26, primals_23, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 144, 144), (20736, 20736, 144, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((24, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((24, 24, 3, 3), (216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((40, 24, 3, 3), (216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((40, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((80, 40, 3, 3), (360, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((80, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((128, 1280), (1280, 1), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((42, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((42, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1_1 = nn.Conv2d(1, 8, 5, 2, 0) self.conv2_1 = nn.Conv2d(8, 16, 3, 1, 0) self.conv2_2 = nn.Conv2d(16, 16, 3, 1, 0) self.conv3_1 = nn.Conv2d(16, 24, 3, 1, 0) self.conv3_2 = nn.Conv2d(24, 24, 3, 1, 0) self.conv4_1 = nn.Conv2d(24, 40, 3, 1, 1) self.conv4_2 = nn.Conv2d(40, 80, 3, 1, 1) self.ip1 = nn.Linear(4 * 4 * 80, 128) self.ip2 = nn.Linear(128, 128) self.ip3 = nn.Linear(128, 42) self.prelu1_1 = nn.PReLU() self.prelu2_1 = nn.PReLU() self.prelu2_2 = nn.PReLU() self.prelu3_1 = nn.PReLU() self.prelu3_2 = nn.PReLU() self.prelu4_1 = nn.PReLU() self.prelu4_2 = nn.PReLU() self.preluip1 = nn.PReLU() self.preluip2 = nn.PReLU() self.ave_pool = nn.AvgPool2d(2, 2, ceil_mode=True) def forward(self, x): x = self.ave_pool(self.prelu1_1(self.conv1_1(x))) x = self.prelu2_1(self.conv2_1(x)) x = self.prelu2_2(self.conv2_2(x)) x = self.ave_pool(x) x = self.prelu3_1(self.conv3_1(x)) x = self.prelu3_2(self.conv3_2(x)) x = self.ave_pool(x) x = self.prelu4_1(self.conv4_1(x)) ip3 = self.prelu4_2(self.conv4_2(x)) ip3 = ip3.view(-1, 4 * 4 * 80) ip3 = self.preluip1(self.ip1(ip3)) ip3 = self.preluip2(self.ip2(ip3)) ip3 = self.ip3(ip3) return ip3 def get_inputs(): return [torch.rand([4, 1, 144, 144])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 8 y1 = yindex // 8 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 8 * x2 + 72 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 576 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 24 y1 = yindex // 24 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 24 * x2 + 216 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 960 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 24 y1 = yindex // 24 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 24 * x2 + 216 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 3200 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 40 y1 = yindex // 40 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 40 * x2 + 360 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 32 xnumel = 4900 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 8 y1 = yindex // 8 tmp0 = tl.load(in_ptr0 + (x2 + 4900 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + 8 * x2 + 39200 * y1), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__prelu_kernel_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 156800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_avg_pool2d_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 39200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 35 x2 = xindex // 280 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1 + 1120 * x2), xmask) tmp1 = tl.load(in_ptr0 + (8 + x0 + 16 * x1 + 1120 * x2), xmask) tmp3 = tl.load(in_ptr0 + (560 + x0 + 16 * x1 + 1120 * x2), xmask) tmp5 = tl.load(in_ptr0 + (568 + x0 + 16 * x1 + 1120 * x2), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 69696 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_10(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 61504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_avg_pool2d_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 256 % 16 x1 = xindex // 16 % 16 x0 = xindex % 16 x3 = xindex // 4096 x6 = xindex tmp0 = 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 31, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 32 * x1 + 992 * x2 + 15376 * x3), tmp10, other=0.0) tmp12 = 1 + 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (16 + x0 + 32 * x1 + 992 * x2 + 15376 * x3), tmp16, other=0.0) tmp18 = tmp17 + tmp11 tmp19 = 1 + 2 * x2 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (496 + x0 + 32 * x1 + 992 * x2 + 15376 * x3), tmp23, other=0.0) tmp25 = tmp24 + tmp18 tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (512 + x0 + 32 * x1 + 992 * x2 + 15376 * x3), tmp26, other=0.0) tmp28 = tmp27 + tmp25 tmp29 = (31 * (31 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 31)) * ( 31 * (31 <= 2 + 2 * x2) + (2 + 2 * x2) * (2 + 2 * x2 < 31) ) + -2 * x1 * (31 * (31 <= 2 + 2 * x2) + (2 + 2 * x2) * (2 + 2 * x2 < 31)) + -2 * x2 * (31 * (31 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 31)) + 4 * x1 * x2 tmp30 = tmp28 / tmp29 tl.store(out_ptr0 + x6, tmp30, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_12(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 24 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_13(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 13824 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 24 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_avg_pool2d_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 3456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 24 x1 = xindex // 24 % 6 x2 = xindex // 144 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 48 * x1 + 576 * x2), xmask) tmp1 = tl.load(in_ptr0 + (24 + x0 + 48 * x1 + 576 * x2), xmask) tmp3 = tl.load(in_ptr0 + (288 + x0 + 48 * x1 + 576 * x2), xmask) tmp5 = tl.load(in_ptr0 + (312 + x0 + 48 * x1 + 576 * x2), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_15(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 5760 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 40 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_16(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 144 xnumel = 80 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 36 y3 = yindex // 36 tmp0 = tl.load(in_out_ptr0 + (x1 + 80 * y0), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, YBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.debug_barrier() tl.store(in_out_ptr0 + (x1 + 80 * y0), tmp2, xmask & ymask) tl.store(out_ptr0 + (y2 + 36 * x1 + 2880 * y3), tmp8, xmask & ymask) @triton.jit def triton_poi_fused__prelu_kernel_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30) = args args.clear() assert_size_stride(primals_1, (8, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 1, 144, 144), (20736, 20736, 144, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (24, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_12, (24,), (1,)) assert_size_stride(primals_13, (1,), (1,)) assert_size_stride(primals_14, (24, 24, 3, 3), (216, 9, 3, 1)) assert_size_stride(primals_15, (24,), (1,)) assert_size_stride(primals_16, (1,), (1,)) assert_size_stride(primals_17, (40, 24, 3, 3), (216, 9, 3, 1)) assert_size_stride(primals_18, (40,), (1,)) assert_size_stride(primals_19, (1,), (1,)) assert_size_stride(primals_20, (80, 40, 3, 3), (360, 9, 3, 1)) assert_size_stride(primals_21, (80,), (1,)) assert_size_stride(primals_22, (1,), (1,)) assert_size_stride(primals_23, (128, 1280), (1280, 1)) assert_size_stride(primals_24, (128,), (1,)) assert_size_stride(primals_25, (1,), (1,)) assert_size_stride(primals_26, (128, 128), (128, 1)) assert_size_stride(primals_27, (128,), (1,)) assert_size_stride(primals_28, (1,), (1,)) assert_size_stride(primals_29, (42, 128), (128, 1)) assert_size_stride(primals_30, (42,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 8, 3, 3), (72, 1, 24, 8), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(128, 9)](primals_5, buf0, 128, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_5 buf1 = empty_strided_cuda((16, 16, 3, 3), (144, 1, 48, 16), torch. float32) triton_poi_fused_1[grid(256, 9)](primals_8, buf1, 256, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf2 = empty_strided_cuda((24, 16, 3, 3), (144, 1, 48, 16), torch. float32) triton_poi_fused_2[grid(384, 9)](primals_11, buf2, 384, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_11 buf3 = empty_strided_cuda((24, 24, 3, 3), (216, 1, 72, 24), torch. float32) triton_poi_fused_3[grid(576, 9)](primals_14, buf3, 576, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf4 = empty_strided_cuda((40, 24, 3, 3), (216, 1, 72, 24), torch. float32) triton_poi_fused_4[grid(960, 9)](primals_17, buf4, 960, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_17 buf5 = empty_strided_cuda((80, 40, 3, 3), (360, 1, 120, 40), torch. float32) triton_poi_fused_5[grid(3200, 9)](primals_20, buf5, 3200, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf6 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 8, 70, 70), (39200, 4900, 70, 1)) buf7 = empty_strided_cuda((4, 8, 70, 70), (39200, 1, 560, 8), torch .float32) triton_poi_fused_convolution_6[grid(32, 4900)](buf6, primals_2, buf7, 32, 4900, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf8 = reinterpret_tensor(buf6, (4, 8, 70, 70), (39200, 1, 560, 8), 0) del buf6 triton_poi_fused__prelu_kernel_7[grid(156800)](buf7, primals_4, buf8, 156800, XBLOCK=512, num_warps=8, num_stages=1) buf9 = empty_strided_cuda((4, 8, 35, 35), (9800, 1, 280, 8), torch. float32) triton_poi_fused_avg_pool2d_8[grid(39200)](buf8, buf9, 39200, XBLOCK=512, num_warps=4, num_stages=1) buf10 = extern_kernels.convolution(buf9, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 16, 33, 33), (17424, 1, 528, 16)) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 16, 33, 33), (17424, 1, 528, 16), torch.float32) triton_poi_fused__prelu_kernel_convolution_9[grid(69696)](buf11, primals_6, primals_7, buf12, 69696, XBLOCK=1024, num_warps=4, num_stages=1) del primals_6 buf13 = extern_kernels.convolution(buf12, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 16, 31, 31), (15376, 1, 496, 16)) buf14 = buf13 del buf13 buf15 = empty_strided_cuda((4, 16, 31, 31), (15376, 1, 496, 16), torch.float32) triton_poi_fused__prelu_kernel_convolution_10[grid(61504)](buf14, primals_9, primals_10, buf15, 61504, XBLOCK=512, num_warps=4, num_stages=1) del primals_9 buf16 = empty_strided_cuda((4, 16, 16, 16), (4096, 1, 256, 16), torch.float32) triton_poi_fused_avg_pool2d_11[grid(16384)](buf15, buf16, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 24, 14, 14), (4704, 1, 336, 24)) buf18 = buf17 del buf17 buf19 = empty_strided_cuda((4, 24, 14, 14), (4704, 1, 336, 24), torch.float32) triton_poi_fused__prelu_kernel_convolution_12[grid(18816)](buf18, primals_12, primals_13, buf19, 18816, XBLOCK=256, num_warps=4, num_stages=1) del primals_12 buf20 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 24, 12, 12), (3456, 1, 288, 24)) buf21 = buf20 del buf20 buf22 = empty_strided_cuda((4, 24, 12, 12), (3456, 1, 288, 24), torch.float32) triton_poi_fused__prelu_kernel_convolution_13[grid(13824)](buf21, primals_15, primals_16, buf22, 13824, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf23 = empty_strided_cuda((4, 24, 6, 6), (864, 1, 144, 24), torch. float32) triton_poi_fused_avg_pool2d_14[grid(3456)](buf22, buf23, 3456, XBLOCK=256, num_warps=4, num_stages=1) buf24 = extern_kernels.convolution(buf23, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 40, 6, 6), (1440, 1, 240, 40)) buf25 = buf24 del buf24 buf26 = empty_strided_cuda((4, 40, 6, 6), (1440, 1, 240, 40), torch .float32) triton_poi_fused__prelu_kernel_convolution_15[grid(5760)](buf25, primals_18, primals_19, buf26, 5760, XBLOCK=256, num_warps=4, num_stages=1) del primals_18 buf27 = extern_kernels.convolution(buf26, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 80, 6, 6), (2880, 1, 480, 80)) buf28 = buf27 del buf27 buf29 = empty_strided_cuda((4, 80, 6, 6), (2880, 36, 6, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_16[grid(144, 80)](buf28, primals_21, primals_22, buf29, 144, 80, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_21 buf30 = empty_strided_cuda((9, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_24, reinterpret_tensor(buf29, (9, 1280 ), (1280, 1), 0), reinterpret_tensor(primals_23, (1280, 128), ( 1, 1280), 0), alpha=1, beta=1, out=buf30) del primals_24 buf31 = empty_strided_cuda((9, 128), (128, 1), torch.float32) triton_poi_fused__prelu_kernel_17[grid(1152)](buf30, primals_25, buf31, 1152, XBLOCK=256, num_warps=4, num_stages=1) buf32 = empty_strided_cuda((9, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_27, buf31, reinterpret_tensor( primals_26, (128, 128), (1, 128), 0), alpha=1, beta=1, out=buf32) del primals_27 buf33 = empty_strided_cuda((9, 128), (128, 1), torch.float32) triton_poi_fused__prelu_kernel_17[grid(1152)](buf32, primals_28, buf33, 1152, XBLOCK=256, num_warps=4, num_stages=1) buf34 = empty_strided_cuda((9, 42), (42, 1), torch.float32) extern_kernels.addmm(primals_30, buf33, reinterpret_tensor( primals_29, (128, 42), (1, 128), 0), alpha=1, beta=1, out=buf34) del primals_30 return (buf34, primals_1, primals_3, primals_4, buf0, primals_7, buf1, primals_10, buf2, primals_13, buf3, primals_16, buf4, primals_19, buf5, primals_22, primals_25, primals_28, buf7, buf8, buf9, buf11, buf12, buf14, buf15, buf16, buf18, buf19, buf21, buf22, buf23, buf25, buf26, buf28, reinterpret_tensor(buf29, (9, 1280), (1280, 1), 0), buf30, buf31, buf32, buf33, primals_29, primals_26, primals_23) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1_1 = nn.Conv2d(1, 8, 5, 2, 0) self.conv2_1 = nn.Conv2d(8, 16, 3, 1, 0) self.conv2_2 = nn.Conv2d(16, 16, 3, 1, 0) self.conv3_1 = nn.Conv2d(16, 24, 3, 1, 0) self.conv3_2 = nn.Conv2d(24, 24, 3, 1, 0) self.conv4_1 = nn.Conv2d(24, 40, 3, 1, 1) self.conv4_2 = nn.Conv2d(40, 80, 3, 1, 1) self.ip1 = nn.Linear(4 * 4 * 80, 128) self.ip2 = nn.Linear(128, 128) self.ip3 = nn.Linear(128, 42) self.prelu1_1 = nn.PReLU() self.prelu2_1 = nn.PReLU() self.prelu2_2 = nn.PReLU() self.prelu3_1 = nn.PReLU() self.prelu3_2 = nn.PReLU() self.prelu4_1 = nn.PReLU() self.prelu4_2 = nn.PReLU() self.preluip1 = nn.PReLU() self.preluip2 = nn.PReLU() self.ave_pool = nn.AvgPool2d(2, 2, ceil_mode=True) def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_5 = self.conv2_1.weight primals_6 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_11 = self.conv3_1.weight primals_12 = self.conv3_1.bias primals_14 = self.conv3_2.weight primals_15 = self.conv3_2.bias primals_17 = self.conv4_1.weight primals_18 = self.conv4_1.bias primals_20 = self.conv4_2.weight primals_21 = self.conv4_2.bias primals_23 = self.ip1.weight primals_24 = self.ip1.bias primals_26 = self.ip2.weight primals_27 = self.ip2.bias primals_29 = self.ip3.weight primals_30 = self.ip3.bias primals_4 = self.prelu1_1.weight primals_7 = self.prelu2_1.weight primals_10 = self.prelu2_2.weight primals_13 = self.prelu3_1.weight primals_16 = self.prelu3_2.weight primals_19 = self.prelu4_1.weight primals_22 = self.prelu4_2.weight primals_25 = self.preluip1.weight primals_28 = self.preluip2.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30]) return output[0]
fengjixuchui/EmbeddedSystem
Net
false
15,423
[ "MIT" ]
228
ae17e41bb120922a99f2d91818c381e38e868040
https://github.com/fengjixuchui/EmbeddedSystem/tree/ae17e41bb120922a99f2d91818c381e38e868040
Delta
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ac/cactp4raijeuupgkf72atpb5dqx2m7nq5k7fpzjavsrr2lquv6jf.py # Topologically Sorted Source Nodes: [specgram_1], Original ATen: [aten.replication_pad1d] # Source node to ATen node mapping: # specgram_1 => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view, [None, None, %clamp_max]), kwargs = {}) triton_poi_fused_replication_pad1d_0 = async_compile.triton('triton_poi_fused_replication_pad1d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad1d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*(x1 % 4)) + (16*(x1 // 16)) + (64*((x1 // 4) % 4)) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ns/cnsqxcdmqckv2b4hbcx2tgslbei3i2eta37z6mobhzwxtt56do2q.py # Topologically Sorted Source Nodes: [arange, kernel], Original ATen: [aten.arange, aten.repeat] # Source node to ATen node mapping: # arange => add, convert_element_type, iota_1, mul # kernel => repeat # Graph fragment: # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (5,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_1, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, -2), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%convert_element_type, [64, 1, 1]), kwargs = {}) triton_poi_fused_arange_repeat_1 = async_compile.triton('triton_poi_fused_arange_repeat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x2 = xindex tmp0 = (-2) + x0 tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7o/c7o6lz3z3wc2hctevxaw5a7vifalwi3bccdwtwzi62tq4djyl2kg.py # Topologically Sorted Source Nodes: [specgram_3], Original ATen: [aten.replication_pad1d] # Source node to ATen node mapping: # specgram_3 => _unsafe_index_1 # Graph fragment: # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_2, [None, None, %clamp_max_1]), kwargs = {}) triton_poi_fused_replication_pad1d_2 = async_compile.triton('triton_poi_fused_replication_pad1d_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad1d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*x1) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/i7/ci7zzwpyjyzmhwbdzexdktbiadv5a4qpgn2zumseiqxejm7orwxo.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %permute_1, %permute_3], -1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x4 = (xindex // 12) x1 = (xindex // 12) % 4 x2 = (xindex // 48) % 4 x3 = (xindex // 192) x5 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + (16*x3) + (64*x2) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = 0.1 tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tmp16 = tl.full([1], 12, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tl.load(in_ptr2 + ((4*x1) + (16*x3) + (64*x2) + ((-8) + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp18 * tmp11 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp14, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + (x5), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [specgram_1], Original ATen: [aten.replication_pad1d] stream0 = get_raw_stream(0) triton_poi_fused_replication_pad1d_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0) buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [arange, kernel], Original ATen: [aten.arange, aten.repeat] triton_poi_fused_arange_repeat_1.run(buf1, 320, grid=grid(320), stream=stream0) # Topologically Sorted Source Nodes: [specgram_1, arange, kernel, conv1d], Original ATen: [aten.replication_pad1d, aten.arange, aten.repeat, aten.convolution] buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf2, (1, 64, 4), (256, 4, 1)) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [specgram_3], Original ATen: [aten.replication_pad1d] triton_poi_fused_replication_pad1d_2.run(buf2, buf3, 512, grid=grid(512), stream=stream0) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [arange_1, kernel_1], Original ATen: [aten.arange, aten.repeat] triton_poi_fused_arange_repeat_1.run(buf4, 320, grid=grid(320), stream=stream0) # Topologically Sorted Source Nodes: [specgram_3, arange_1, kernel_1, conv1d_1], Original ATen: [aten.replication_pad1d, aten.arange, aten.repeat, aten.convolution] buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf5, (1, 64, 4), (256, 4, 1)) del buf3 del buf4 buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(arg0_1, buf2, buf5, buf6, 768, grid=grid(768), stream=stream0) del arg0_1 del buf2 del buf5 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torchaudio import transforms class Delta(nn.Module): def __init__(self, order=2, **kwargs): super(Delta, self).__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, x): feats = [x] for o in range(self.order): feat = feats[-1].transpose(0, 1).unsqueeze(0) delta = self.compute_delta(feat) feats.append(delta.squeeze(0).transpose(0, 1)) x = torch.cat(feats, dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torchaudio import transforms assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * (x1 // 16) + 64 * (x1 // 4 % 4) + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x2 = xindex tmp0 = -2 + x0 tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x2, tmp1, xmask) @triton.jit def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x4 = xindex // 12 x1 = xindex // 12 % 4 x2 = xindex // 48 % 4 x3 = xindex // 192 x5 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + 16 * x3 + 64 * x2 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = 0.1 tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp18 = tl.load(in_ptr2 + (4 * x1 + 16 * x3 + 64 * x2 + (-8 + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp18 * tmp11 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp14, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + x5, tmp23, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad1d_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32) triton_poi_fused_arange_repeat_1[grid(320)](buf1, 320, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf2, (1, 64, 4), (256, 4, 1)) buf3 = buf0 del buf0 triton_poi_fused_replication_pad1d_2[grid(512)](buf2, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) buf4 = buf1 del buf1 triton_poi_fused_arange_repeat_1[grid(320)](buf4, 320, XBLOCK=256, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf5, (1, 64, 4), (256, 4, 1)) del buf3 del buf4 buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch. float32) triton_poi_fused_cat_3[grid(768)](arg0_1, buf2, buf5, buf6, 768, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf2 del buf5 return buf6, class DeltaNew(nn.Module): def __init__(self, order=2, **kwargs): super(DeltaNew, self).__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
gcambara/s3prl
Delta
false
15,424
[ "MIT" ]
856
33284ebde3a903ed8604d6dae85669d0174ae1d3
https://github.com/gcambara/s3prl/tree/33284ebde3a903ed8604d6dae85669d0174ae1d3
Glu
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/yd/cydoiibutfe3ome7v57n5uwsr4vxzbjyuhhdscab4ea22iuzhboe.py # Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask) tmp1 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Glu(nn.Module): def __init__(self, dim): super(Glu, self).__init__() self.dim = dim def forward(self, x): x_in, x_gate = x.chunk(2, dim=self.dim) return x_in * x_gate.sigmoid() def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(512)](arg0_1, buf0, 512, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GluNew(nn.Module): def __init__(self, dim): super(GluNew, self).__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
gheyret/EfficientConformer
Glu
false
15,425
[ "Apache-2.0" ]
101
b28a0aaa3b182f72abaccbeb12df0402adf96097
https://github.com/gheyret/EfficientConformer/tree/b28a0aaa3b182f72abaccbeb12df0402adf96097
VisErrorLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/vi/cviz7umjsgwfyha45vr7icgzyp544epwc7hzi4r2qa332s2kq3l6.py # Topologically Sorted Source Nodes: [amplitude_1, amplitude], Original ATen: [aten.max] # Source node to ATen node mapping: # amplitude => max_1 # amplitude_1 => max_2 # Graph fragment: # %max_2 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%arg1_1,), kwargs = {}) # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%arg1_1,), kwargs = {}) triton_per_fused_max_0 = async_compile.triton('triton_per_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp3, None) tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yh/cyhqqjam7gc2ha743sss3nrqwevlunp2xhhk5bdk6ef2fkcseqov.py # Topologically Sorted Source Nodes: [sub_1, diff_1, truediv_4, gt_1, vismap_1, ge_2, pos_ids_1, float_5, mul_4, sum_9, float_6, sum_11, truediv_5, le_1, ge_3, neg_ids_1, float_7, mul_5, sum_13, float_8, sum_15, sub, diff, truediv, gt, vismap, ge, pos_ids, float_1, mul, sum_1, float_2, sum_3, truediv_1, le, ge_1, neg_ids, float_3, mul_1, sum_5, float_4, sum_7], Original ATen: [aten.sub, aten.abs, aten.div, aten.gt, aten.repeat, aten.ge, aten.bitwise_and, aten._to_copy, aten.mul, aten.sum, aten.le] # Source node to ATen node mapping: # diff => abs_1 # diff_1 => abs_2 # float_1 => convert_element_type # float_2 => convert_element_type_1 # float_3 => convert_element_type_2 # float_4 => convert_element_type_3 # float_5 => convert_element_type_4 # float_6 => convert_element_type_5 # float_7 => convert_element_type_6 # float_8 => convert_element_type_7 # ge => ge # ge_1 => ge_1 # ge_2 => ge_2 # ge_3 => ge_3 # gt => gt # gt_1 => gt_1 # le => le # le_1 => le_1 # mul => mul # mul_1 => mul_1 # mul_4 => mul_4 # mul_5 => mul_5 # neg_ids => bitwise_and_1 # neg_ids_1 => bitwise_and_3 # pos_ids => bitwise_and # pos_ids_1 => bitwise_and_2 # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # sum_11 => sum_11 # sum_13 => sum_13 # sum_15 => sum_15 # sum_3 => sum_3 # sum_5 => sum_5 # sum_7 => sum_7 # sum_9 => sum_9 # truediv => div # truediv_1 => div_1 # truediv_4 => div_4 # truediv_5 => div_5 # vismap => repeat # vismap_1 => repeat_1 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %view_4), kwargs = {}) # %abs_2 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_2, 10), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%view_3, %div_4), kwargs = {}) # %repeat_1 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%arg2_1, [1, 1, 16]), kwargs = {}) # %ge_2 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%repeat_1, 0), kwargs = {}) # %bitwise_and_2 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt_1, %ge_2), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_2, torch.float32), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_2, %convert_element_type_4), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [2]), kwargs = {}) # %convert_element_type_5 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_2, torch.float32), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_5, [2]), kwargs = {}) # %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_2, 10), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Tensor](args = (%view_3, %div_5), kwargs = {}) # %ge_3 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%repeat_1, 0), kwargs = {}) # %bitwise_and_3 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%le_1, %ge_3), kwargs = {}) # %convert_element_type_6 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_3, torch.float32), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_2, %convert_element_type_6), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [2]), kwargs = {}) # %convert_element_type_7 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_3, torch.float32), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_7, [2]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {}) # %abs_1 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_1, 10), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%view, %div), kwargs = {}) # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%arg2_1, [1, 1, 16]), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%repeat, 0), kwargs = {}) # %bitwise_and : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt, %ge), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, %convert_element_type), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and, torch.float32), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_1, [2]), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_1, 10), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Tensor](args = (%view, %div_1), kwargs = {}) # %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%repeat, 0), kwargs = {}) # %bitwise_and_1 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%le, %ge_1), kwargs = {}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_1, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, %convert_element_type_2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {}) # %convert_element_type_3 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_1, torch.float32), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_3, [2]), kwargs = {}) triton_per_fused__to_copy_abs_bitwise_and_div_ge_gt_le_mul_repeat_sub_sum_1 = async_compile.triton('triton_per_fused__to_copy_abs_bitwise_and_div_ge_gt_le_mul_repeat_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: 'i32', 15: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_abs_bitwise_and_div_ge_gt_le_mul_repeat_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 8, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_abs_bitwise_and_div_ge_gt_le_mul_repeat_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp6 = tl.load(in_ptr2 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr4 + (r1 + (16*x0)), xmask, other=0.0) tmp41 = tl.load(in_ptr5 + (0)) tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = tmp0 - tmp3 tmp5 = tl_math.abs(tmp4) tmp8 = 0.1 tmp9 = tmp7 * tmp8 tmp10 = tmp0 > tmp9 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp14 = tmp10 & tmp13 tmp15 = tmp14.to(tl.float32) tmp16 = tmp5 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp0 <= tmp9 tmp22 = tmp21 & tmp13 tmp23 = tmp22.to(tl.float32) tmp24 = tmp5 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.where(xmask, tmp25, 0) tmp28 = tl.sum(tmp27, 1)[:, None] tmp29 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp31 = tl.where(xmask, tmp29, 0) tmp32 = tl.sum(tmp31, 1)[:, None] tmp33 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp38 = triton_helpers.maximum(tmp2, tmp37) tmp39 = tmp0 - tmp38 tmp40 = tl_math.abs(tmp39) tmp43 = tmp42 * tmp8 tmp44 = tmp0 > tmp43 tmp45 = tmp44 & tmp13 tmp46 = tmp45.to(tl.float32) tmp47 = tmp40 * tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.where(xmask, tmp48, 0) tmp51 = tl.sum(tmp50, 1)[:, None] tmp52 = tmp0 <= tmp43 tmp53 = tmp52 & tmp13 tmp54 = tmp53.to(tl.float32) tmp55 = tmp40 * tmp54 tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.where(xmask, tmp56, 0) tmp59 = tl.sum(tmp58, 1)[:, None] tmp60 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK]) tmp62 = tl.where(xmask, tmp60, 0) tmp63 = tl.sum(tmp62, 1)[:, None] tmp64 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp66 = tl.where(xmask, tmp64, 0) tmp67 = tl.sum(tmp66, 1)[:, None] tl.store(out_ptr0 + (x0), tmp20, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) tl.store(out_ptr2 + (x0), tmp32, xmask) tl.store(out_ptr3 + (x0), tmp36, xmask) tl.store(out_ptr4 + (x0), tmp51, xmask) tl.store(out_ptr5 + (x0), tmp59, xmask) tl.store(out_ptr6 + (x0), tmp63, xmask) tl.store(out_ptr7 + (x0), tmp67, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/la/claqn2rhafwydbl5wcdrecdfhka6yuepajc3ejfzcnz4eb5guzpw.py # Topologically Sorted Source Nodes: [sum_10, sum_12, add_3, pos_loss_1, mul_6, sum_14, sum_16, add_4, neg_loss_1, mul_7, total_loss_1], Original ATen: [aten.sum, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add_3 => add_3 # add_4 => add_4 # mul_6 => mul_6 # mul_7 => mul_7 # neg_loss_1 => div_7 # pos_loss_1 => div_6 # sum_10 => sum_10 # sum_12 => sum_12 # sum_14 => sum_14 # sum_16 => sum_16 # total_loss_1 => add_5 # Graph fragment: # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_9, [0]), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_11, [0]), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_12, 0.0001), kwargs = {}) # %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_10, %add_3), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_6, 0.5), kwargs = {}) # %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_13, [0]), kwargs = {}) # %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_15, [0]), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_16, 0.0001), kwargs = {}) # %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_14, %add_4), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_7, 0.5), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %mul_7), kwargs = {}) triton_poi_fused_add_div_mul_sum_2 = async_compile.triton('triton_poi_fused_add_div_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp7 = tl.load(in_ptr1 + (x0), xmask) tmp8 = tl.load(in_ptr1 + (4 + x0), xmask) tmp10 = tl.load(in_ptr1 + (8 + x0), xmask) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask) tmp19 = tl.load(in_ptr2 + (x0), xmask) tmp20 = tl.load(in_ptr2 + (4 + x0), xmask) tmp22 = tl.load(in_ptr2 + (8 + x0), xmask) tmp24 = tl.load(in_ptr2 + (12 + x0), xmask) tmp26 = tl.load(in_ptr3 + (x0), xmask) tmp27 = tl.load(in_ptr3 + (4 + x0), xmask) tmp29 = tl.load(in_ptr3 + (8 + x0), xmask) tmp31 = tl.load(in_ptr3 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tl.store(out_ptr0 + (x0), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zk/czkt5zjkqewwauep7wrsfmtdtevalevm3dc7qo3b6pzlv6bbxwje.py # Topologically Sorted Source Nodes: [loss2], Original ATen: [aten.mean] # Source node to ATen node mapping: # loss2 => mean_1 # Graph fragment: # %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%getitem,), kwargs = {}) triton_per_fused_mean_3 = async_compile.triton('triton_per_fused_mean_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 2], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 2 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ey/ceyujj5cgkchalnqhwviwmpf7a6gjspg5axzyukvabotlr3tjxcy.py # Topologically Sorted Source Nodes: [sum_2, sum_4, add, pos_loss, mul_2, sum_6, sum_8, add_1, neg_loss, mul_3, total_loss, loss1, loss2, add_6], Original ATen: [aten.sum, aten.add, aten.div, aten.mul, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_6 => add_6 # loss1 => mean # loss2 => mean_1 # mul_2 => mul_2 # mul_3 => mul_3 # neg_loss => div_3 # pos_loss => div_2 # sum_2 => sum_2 # sum_4 => sum_4 # sum_6 => sum_6 # sum_8 => sum_8 # total_loss => add_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [0]), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_3, [0]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, 0.0001), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %add), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, 0.5), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_5, [0]), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_7, [0]), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_8, 0.0001), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, %add_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, 0.5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%add_2,), kwargs = {}) # %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%getitem,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {}) triton_per_fused_add_div_mean_mul_sum_4 = async_compile.triton('triton_per_fused_add_div_mean_mul_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=(7,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sum_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_sum_4(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (4 + r0), None) tmp3 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp7 = tl.load(in_ptr1 + (r0), None) tmp8 = tl.load(in_ptr1 + (4 + r0), None) tmp10 = tl.load(in_ptr1 + (8 + r0), None) tmp12 = tl.load(in_ptr1 + (12 + r0), None) tmp19 = tl.load(in_ptr2 + (r0), None) tmp20 = tl.load(in_ptr2 + (4 + r0), None) tmp22 = tl.load(in_ptr2 + (8 + r0), None) tmp24 = tl.load(in_ptr2 + (12 + r0), None) tmp26 = tl.load(in_ptr3 + (r0), None) tmp27 = tl.load(in_ptr3 + (4 + r0), None) tmp29 = tl.load(in_ptr3 + (8 + r0), None) tmp31 = tl.load(in_ptr3 + (12 + r0), None) tmp42 = tl.load(in_out_ptr1 + (0)) tmp43 = tl.broadcast_to(tmp42, [XBLOCK, 1]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 4.0 tmp41 = tmp39 / tmp40 tmp44 = 2.0 tmp45 = tmp43 / tmp44 tmp46 = tmp41 + tmp45 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp41, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp45, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp46, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf9 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [amplitude_1, amplitude], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_per_fused_max_0.run(arg1_1, buf0, buf9, 1, 256, grid=grid(1), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1, diff_1, truediv_4, gt_1, vismap_1, ge_2, pos_ids_1, float_5, mul_4, sum_9, float_6, sum_11, truediv_5, le_1, ge_3, neg_ids_1, float_7, mul_5, sum_13, float_8, sum_15, sub, diff, truediv, gt, vismap, ge, pos_ids, float_1, mul, sum_1, float_2, sum_3, truediv_1, le, ge_1, neg_ids, float_3, mul_1, sum_5, float_4, sum_7], Original ATen: [aten.sub, aten.abs, aten.div, aten.gt, aten.repeat, aten.ge, aten.bitwise_and, aten._to_copy, aten.mul, aten.sum, aten.le] triton_per_fused__to_copy_abs_bitwise_and_div_ge_gt_le_mul_repeat_sub_sum_1.run(arg1_1, arg3_1, buf0, arg2_1, arg0_1, buf9, buf1, buf3, buf2, buf4, buf10, buf12, buf11, buf13, 16, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 buf5 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sum_10, sum_12, add_3, pos_loss_1, mul_6, sum_14, sum_16, add_4, neg_loss_1, mul_7, total_loss_1], Original ATen: [aten.sum, aten.add, aten.div, aten.mul] triton_poi_fused_add_div_mul_sum_2.run(buf1, buf2, buf3, buf4, buf5, 4, grid=grid(4), stream=stream0) del buf1 del buf2 del buf3 del buf4 # Topologically Sorted Source Nodes: [topk], Original ATen: [aten.topk] buf6 = torch.ops.aten.topk.default(buf5, 2) del buf5 buf7 = buf6[0] del buf6 buf17 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [loss2], Original ATen: [aten.mean] triton_per_fused_mean_3.run(buf7, buf17, 1, 2, grid=grid(1), stream=stream0) del buf7 buf15 = buf0; del buf0 # reuse buf16 = buf15; del buf15 # reuse buf18 = buf17; del buf17 # reuse buf19 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [sum_2, sum_4, add, pos_loss, mul_2, sum_6, sum_8, add_1, neg_loss, mul_3, total_loss, loss1, loss2, add_6], Original ATen: [aten.sum, aten.add, aten.div, aten.mul, aten.mean] triton_per_fused_add_div_mean_mul_sum_4.run(buf16, buf18, buf10, buf11, buf12, buf13, buf19, 1, 4, grid=grid(1), stream=stream0) del buf10 del buf11 del buf12 del buf13 return (buf19, buf16, buf18, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class VisErrorLoss(nn.Module): def __init__(self): super(VisErrorLoss, self).__init__() def compute_l1_weighted_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) amplitude = torch.max(hm_targets) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) pos_ids = (hm_targets > amplitude / 10) & (vismap >= 0) neg_ids = (hm_targets <= amplitude / 10) & (vismap >= 0) diff = (hm_targets - hm_preds).abs() pos_loss = (diff * pos_ids.float()).sum(2).sum(0) / (pos_ids.float( ).sum(2).sum(0) + epsilon) neg_loss = (diff * neg_ids.float()).sum(2).sum(0) / (neg_ids.float( ).sum(2).sum(0) + epsilon) total_loss = 0.5 * pos_loss + 0.5 * neg_loss if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def compute_l2_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) ids = vismap == 1 diff = (hm_targets - hm_preds) ** 2 total_loss = (diff * ids.float()).sum(2).sum(0) / (ids.float().sum( 2).sum(0) + epsilon) if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def forward(self, hm_targets, hm_preds1, hm_preds2, vismap): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ loss1 = self.compute_l1_weighted_loss(hm_targets, hm_preds1, vismap) loss2 = self.compute_l1_weighted_loss(hm_targets, hm_preds2, vismap, ohem=0.5) return loss1 + loss2, loss1, loss2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 1])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused__to_copy_abs_bitwise_and_div_ge_gt_le_mul_repeat_sub_sum_1( in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp6 = tl.load(in_ptr2 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr4 + (r1 + 16 * x0), xmask, other=0.0) tmp41 = tl.load(in_ptr5 + 0) tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = tmp0 - tmp3 tmp5 = tl_math.abs(tmp4) tmp8 = 0.1 tmp9 = tmp7 * tmp8 tmp10 = tmp0 > tmp9 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp14 = tmp10 & tmp13 tmp15 = tmp14.to(tl.float32) tmp16 = tmp5 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp0 <= tmp9 tmp22 = tmp21 & tmp13 tmp23 = tmp22.to(tl.float32) tmp24 = tmp5 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.where(xmask, tmp25, 0) tmp28 = tl.sum(tmp27, 1)[:, None] tmp29 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp31 = tl.where(xmask, tmp29, 0) tmp32 = tl.sum(tmp31, 1)[:, None] tmp33 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp38 = triton_helpers.maximum(tmp2, tmp37) tmp39 = tmp0 - tmp38 tmp40 = tl_math.abs(tmp39) tmp43 = tmp42 * tmp8 tmp44 = tmp0 > tmp43 tmp45 = tmp44 & tmp13 tmp46 = tmp45.to(tl.float32) tmp47 = tmp40 * tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.where(xmask, tmp48, 0) tmp51 = tl.sum(tmp50, 1)[:, None] tmp52 = tmp0 <= tmp43 tmp53 = tmp52 & tmp13 tmp54 = tmp53.to(tl.float32) tmp55 = tmp40 * tmp54 tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.where(xmask, tmp56, 0) tmp59 = tl.sum(tmp58, 1)[:, None] tmp60 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK]) tmp62 = tl.where(xmask, tmp60, 0) tmp63 = tl.sum(tmp62, 1)[:, None] tmp64 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp66 = tl.where(xmask, tmp64, 0) tmp67 = tl.sum(tmp66, 1)[:, None] tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) tl.store(out_ptr2 + x0, tmp32, xmask) tl.store(out_ptr3 + x0, tmp36, xmask) tl.store(out_ptr4 + x0, tmp51, xmask) tl.store(out_ptr5 + x0, tmp59, xmask) tl.store(out_ptr6 + x0, tmp63, xmask) tl.store(out_ptr7 + x0, tmp67, xmask) @triton.jit def triton_poi_fused_add_div_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp7 = tl.load(in_ptr1 + x0, xmask) tmp8 = tl.load(in_ptr1 + (4 + x0), xmask) tmp10 = tl.load(in_ptr1 + (8 + x0), xmask) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask) tmp19 = tl.load(in_ptr2 + x0, xmask) tmp20 = tl.load(in_ptr2 + (4 + x0), xmask) tmp22 = tl.load(in_ptr2 + (8 + x0), xmask) tmp24 = tl.load(in_ptr2 + (12 + x0), xmask) tmp26 = tl.load(in_ptr3 + x0, xmask) tmp27 = tl.load(in_ptr3 + (4 + x0), xmask) tmp29 = tl.load(in_ptr3 + (8 + x0), xmask) tmp31 = tl.load(in_ptr3 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tl.store(out_ptr0 + x0, tmp36, xmask) @triton.jit def triton_per_fused_mean_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused_add_div_mean_mul_sum_4(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (4 + r0), None) tmp3 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp7 = tl.load(in_ptr1 + r0, None) tmp8 = tl.load(in_ptr1 + (4 + r0), None) tmp10 = tl.load(in_ptr1 + (8 + r0), None) tmp12 = tl.load(in_ptr1 + (12 + r0), None) tmp19 = tl.load(in_ptr2 + r0, None) tmp20 = tl.load(in_ptr2 + (4 + r0), None) tmp22 = tl.load(in_ptr2 + (8 + r0), None) tmp24 = tl.load(in_ptr2 + (12 + r0), None) tmp26 = tl.load(in_ptr3 + r0, None) tmp27 = tl.load(in_ptr3 + (4 + r0), None) tmp29 = tl.load(in_ptr3 + (8 + r0), None) tmp31 = tl.load(in_ptr3 + (12 + r0), None) tmp42 = tl.load(in_out_ptr1 + 0) tmp43 = tl.broadcast_to(tmp42, [XBLOCK, 1]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 4.0 tmp41 = tmp39 / tmp40 tmp44 = 2.0 tmp45 = tmp43 / tmp44 tmp46 = tmp41 + tmp45 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp41, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp45, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp46, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf9 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_max_0[grid(1)](arg1_1, buf0, buf9, 1, 256, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused__to_copy_abs_bitwise_and_div_ge_gt_le_mul_repeat_sub_sum_1[ grid(16)](arg1_1, arg3_1, buf0, arg2_1, arg0_1, buf9, buf1, buf3, buf2, buf4, buf10, buf12, buf11, buf13, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_div_mul_sum_2[grid(4)](buf1, buf2, buf3, buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf1 del buf2 del buf3 del buf4 buf6 = torch.ops.aten.topk.default(buf5, 2) del buf5 buf7 = buf6[0] del buf6 buf17 = buf9 del buf9 triton_per_fused_mean_3[grid(1)](buf7, buf17, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) del buf7 buf15 = buf0 del buf0 buf16 = buf15 del buf15 buf18 = buf17 del buf17 buf19 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_div_mean_mul_sum_4[grid(1)](buf16, buf18, buf10, buf11, buf12, buf13, buf19, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf10 del buf11 del buf12 del buf13 return buf19, buf16, buf18 class VisErrorLossNew(nn.Module): def __init__(self): super(VisErrorLossNew, self).__init__() def compute_l1_weighted_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) amplitude = torch.max(hm_targets) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) pos_ids = (hm_targets > amplitude / 10) & (vismap >= 0) neg_ids = (hm_targets <= amplitude / 10) & (vismap >= 0) diff = (hm_targets - hm_preds).abs() pos_loss = (diff * pos_ids.float()).sum(2).sum(0) / (pos_ids.float( ).sum(2).sum(0) + epsilon) neg_loss = (diff * neg_ids.float()).sum(2).sum(0) / (neg_ids.float( ).sum(2).sum(0) + epsilon) total_loss = 0.5 * pos_loss + 0.5 * neg_loss if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def compute_l2_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) ids = vismap == 1 diff = (hm_targets - hm_preds) ** 2 total_loss = (diff * ids.float()).sum(2).sum(0) / (ids.float().sum( 2).sum(0) + epsilon) if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg3_1 = input_2 arg2_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1], output[2]
gathierry/FashionAI-KeyPointsDetectionOfApparel
VisErrorLoss
false
15,426
[ "Apache-2.0" ]
174
2e0942b42b4a9cd974cdddc151675738dc8a8cb4
https://github.com/gathierry/FashionAI-KeyPointsDetectionOfApparel/tree/2e0942b42b4a9cd974cdddc151675738dc8a8cb4
GroupedMultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ql/cqleo2ylfw6cxt5pbke4lx3lscoa6hddoh32alifke3wqf3ffspw.py # Topologically Sorted Source Nodes: [att_w], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_w => div_1, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tl.store(in_out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [Q], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [K], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [V], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [att_w], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf4, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [O], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf2, (16, 1, 4), (4, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [O_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_11 return (reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), buf4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 16), 0), reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 16), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Linear(nn.Linear): def __init__(self, in_features, out_features, bias=True): super(Linear, self).__init__(in_features=in_features, out_features= out_features, bias=bias) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input): weight = self.weight if self.noise is not None and self.training: weight = weight + self.vn_std * self.noise return F.linear(input, weight, self.bias) class MultiHeadAttention(nn.Module): """Mutli-Head Attention Layer Args: dim_model: model feature dimension num_heads: number of attention heads References: Attention Is All You Need, Vaswani et al. https://arxiv.org/abs/1706.03762 """ def __init__(self, dim_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.dim_model = dim_model self.dim_head = dim_model // num_heads self.query_layer = Linear(self.dim_model, self.dim_model) self.key_layer = Linear(self.dim_model, self.dim_model) self.value_layer = Linear(self.dim_model, self.dim_model) self.output_layer = Linear(self.dim_model, self.dim_model) def forward(self, Q, K, V, mask=None): """Scaled Dot-Product Multi-Head Attention Args: Q: Query of shape (B, T, D) K: Key of shape (B, T, D) V: Value of shape (B, T, D) mask: Optional position mask of shape (1 or B, 1 or H, 1 or T, 1 or T) Return: O: Attention output of shape (B, T, D) att_w: Attention weights of shape (B, H, T, T) """ batch_size = Q.size(0) Q = self.query_layer(Q) K = self.key_layer(K) V = self.value_layer(V) Q = Q.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) K = K.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) V = V.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) att_scores = Q.matmul(K.transpose(2, 3)) / K.shape[-1] ** 0.5 if mask is not None: att_scores += mask * -1000000000.0 att_w = att_scores.softmax(dim=-1) O = att_w.matmul(V) O = O.transpose(1, 2).reshape(batch_size, -1, self.dim_model) O = self.output_layer(O) return O, att_w.detach() def pad(self, Q, K, V, mask, chunk_size): overflow_Q = Q.size(1) % chunk_size overflow_KV = K.size(1) % chunk_size padding_Q = chunk_size - overflow_Q if overflow_Q else 0 padding_KV = chunk_size - overflow_KV if overflow_KV else 0 batch_size, seq_len_KV, _ = K.size() Q = F.pad(Q, (0, 0, 0, padding_Q), value=0) K = F.pad(K, (0, 0, 0, padding_KV), value=0) V = F.pad(V, (0, 0, 0, padding_KV), value=0) if mask is not None: if mask.size(2) == 1: mask = F.pad(mask, pad=(0, padding_KV), value=1) else: mask = F.pad(mask, pad=(0, padding_Q, 0, padding_KV), value=1) elif padding_KV: mask = F.pad(Q.new_zeros(batch_size, 1, 1, seq_len_KV), pad=(0, padding_KV), value=1) return Q, K, V, mask, padding_Q class GroupedMultiHeadAttention(MultiHeadAttention): """Grouped Mutli-Head Attention Layer Grouped multi-head attention reduces attention complexity from O(T2·D) to O(T2·D/G) by grouping neighbouring time elements along the feature dimension before applying scaled dot-product attention. Args: dim_model: model feature dimension num_heads: number of attention heads group_size: attention group size """ def __init__(self, dim_model, num_heads, group_size): super(GroupedMultiHeadAttention, self).__init__(dim_model, num_heads) self.group_size = group_size self.dim_head = self.group_size * dim_model // self.num_heads def forward(self, Q, K, V, mask=None): batch_size = Q.size(0) Q = self.query_layer(Q) K = self.key_layer(K) V = self.value_layer(V) Q, K, V, mask, padding = self.pad(Q, K, V, mask, chunk_size=self. group_size) Q = Q.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) K = K.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) V = V.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) att_scores = Q.matmul(K.transpose(2, 3)) / K.shape[-1] ** 0.5 if mask is not None: mask = mask[:, :, ::self.group_size, ::self.group_size] att_scores += mask * -1000000000.0 att_w = att_scores.softmax(dim=-1) O = att_w.matmul(V) O = O.transpose(1, 2).reshape(batch_size, -1, self.dim_model) O = O[:, :O.size(1) - padding] O = self.output_layer(O) return O, att_w.detach() def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'dim_model': 4, 'num_heads': 4, 'group_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tl.store(in_out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf3 get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf2, (16, 1, 4), (4, 4, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_11 return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0 ), buf4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 16), 0 ), reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 16), 0 ), reinterpret_tensor(buf1, (16, 1, 4), (4, 16, 1), 0) class Linear(nn.Linear): def __init__(self, in_features, out_features, bias=True): super(Linear, self).__init__(in_features=in_features, out_features= out_features, bias=bias) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input): weight = self.weight if self.noise is not None and self.training: weight = weight + self.vn_std * self.noise return F.linear(input, weight, self.bias) class MultiHeadAttention(nn.Module): """Mutli-Head Attention Layer Args: dim_model: model feature dimension num_heads: number of attention heads References: Attention Is All You Need, Vaswani et al. https://arxiv.org/abs/1706.03762 """ def __init__(self, dim_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.dim_model = dim_model self.dim_head = dim_model // num_heads self.query_layer = Linear(self.dim_model, self.dim_model) self.key_layer = Linear(self.dim_model, self.dim_model) self.value_layer = Linear(self.dim_model, self.dim_model) self.output_layer = Linear(self.dim_model, self.dim_model) def forward(self, Q, K, V, mask=None): """Scaled Dot-Product Multi-Head Attention Args: Q: Query of shape (B, T, D) K: Key of shape (B, T, D) V: Value of shape (B, T, D) mask: Optional position mask of shape (1 or B, 1 or H, 1 or T, 1 or T) Return: O: Attention output of shape (B, T, D) att_w: Attention weights of shape (B, H, T, T) """ batch_size = Q.size(0) Q = self.query_layer(Q) K = self.key_layer(K) V = self.value_layer(V) Q = Q.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) K = K.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) V = V.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) att_scores = Q.matmul(K.transpose(2, 3)) / K.shape[-1] ** 0.5 if mask is not None: att_scores += mask * -1000000000.0 att_w = att_scores.softmax(dim=-1) O = att_w.matmul(V) O = O.transpose(1, 2).reshape(batch_size, -1, self.dim_model) O = self.output_layer(O) return O, att_w.detach() def pad(self, Q, K, V, mask, chunk_size): overflow_Q = Q.size(1) % chunk_size overflow_KV = K.size(1) % chunk_size padding_Q = chunk_size - overflow_Q if overflow_Q else 0 padding_KV = chunk_size - overflow_KV if overflow_KV else 0 batch_size, seq_len_KV, _ = K.size() Q = F.pad(Q, (0, 0, 0, padding_Q), value=0) K = F.pad(K, (0, 0, 0, padding_KV), value=0) V = F.pad(V, (0, 0, 0, padding_KV), value=0) if mask is not None: if mask.size(2) == 1: mask = F.pad(mask, pad=(0, padding_KV), value=1) else: mask = F.pad(mask, pad=(0, padding_Q, 0, padding_KV), value=1) elif padding_KV: mask = F.pad(Q.new_zeros(batch_size, 1, 1, seq_len_KV), pad=(0, padding_KV), value=1) return Q, K, V, mask, padding_Q class GroupedMultiHeadAttentionNew(MultiHeadAttention): """Grouped Mutli-Head Attention Layer Grouped multi-head attention reduces attention complexity from O(T2·D) to O(T2·D/G) by grouping neighbouring time elements along the feature dimension before applying scaled dot-product attention. Args: dim_model: model feature dimension num_heads: number of attention heads group_size: attention group size """ def __init__(self, dim_model, num_heads, group_size): super(GroupedMultiHeadAttentionNew, self).__init__(dim_model, num_heads ) self.group_size = group_size self.dim_head = self.group_size * dim_model // self.num_heads def forward(self, input_0, input_1, input_2): primals_2 = self.query_layer.weight primals_3 = self.query_layer.bias primals_4 = self.key_layer.weight primals_5 = self.key_layer.bias primals_7 = self.value_layer.weight primals_8 = self.value_layer.bias primals_10 = self.output_layer.weight primals_11 = self.output_layer.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
gheyret/EfficientConformer
GroupedMultiHeadAttention
false
15,427
[ "Apache-2.0" ]
101
b28a0aaa3b182f72abaccbeb12df0402adf96097
https://github.com/gheyret/EfficientConformer/tree/b28a0aaa3b182f72abaccbeb12df0402adf96097
RelativeThreshold_RegLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/uj/cujzhhrnh77c6uptmjf7irvjxwjfxnc223eitibmbtplp6ndvm6q.py # Topologically Sorted Source Nodes: [sub, abs_1, add, baseV_1, relativeDist, mask], Original ATen: [aten.sub, aten.abs, aten.add, aten.div, aten.ge] # Source node to ATen node mapping: # abs_1 => abs_1 # add => add # baseV_1 => abs_2 # mask => ge # relativeDist => div # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, 1e-07), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, %abs_2), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%div, 4), kwargs = {}) triton_poi_fused_abs_add_div_ge_sub_0 = async_compile.triton('triton_poi_fused_abs_add_div_ge_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_div_ge_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_add_div_ge_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1e-07 tmp5 = tmp1 + tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp3 / tmp6 tmp8 = 4.0 tmp9 = tmp7 >= tmp8 tl.store(out_ptr0 + (x0), tmp3, xmask) tl.store(out_ptr1 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((256, ), (1, ), torch.bool) # Topologically Sorted Source Nodes: [sub, abs_1, add, baseV_1, relativeDist, mask], Original ATen: [aten.sub, aten.abs, aten.add, aten.div, aten.ge] stream0 = get_raw_stream(0) triton_poi_fused_abs_add_div_ge_sub_0.run(arg1_1, arg0_1, buf0, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (reinterpret_tensor(buf0, (256, ), (1, ), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init class RelativeThreshold_RegLoss(nn.Module): def __init__(self, threshold, size_average=True): super(RelativeThreshold_RegLoss, self).__init__() self.size_average = size_average self.eps = 1e-07 self.threshold = threshold def forward(self, preds, targets): """ Args: inputs:(n, h, w, d) targets:(n, h, w, d) """ assert not targets.requires_grad assert preds.shape == targets.shape, 'dim of preds and targets are different' dist = torch.abs(preds - targets).view(-1) baseV = targets.view(-1) baseV = torch.abs(baseV + self.eps) relativeDist = torch.div(dist, baseV) mask = relativeDist.ge(self.threshold) largerLossVec = torch.masked_select(dist, mask) loss = torch.mean(largerLossVec) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'threshold': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_abs_add_div_ge_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1e-07 tmp5 = tmp1 + tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp3 / tmp6 tmp8 = 4.0 tmp9 = tmp7 >= tmp8 tl.store(out_ptr0 + x0, tmp3, xmask) tl.store(out_ptr1 + x0, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((256,), (1,), torch.bool) get_raw_stream(0) triton_poi_fused_abs_add_div_ge_sub_0[grid(256)](arg1_1, arg0_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return reinterpret_tensor(buf0, (256,), (1,), 0), buf1 class RelativeThreshold_RegLossNew(nn.Module): def __init__(self, threshold, size_average=True): super(RelativeThreshold_RegLossNew, self).__init__() self.size_average = size_average self.eps = 1e-07 self.threshold = threshold def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ginobilinie/medSynthesisV1
RelativeThreshold_RegLoss
false
15,428
[ "MIT" ]
166
1fd202c5928466ef9b11cfebc4490341899312e7
https://github.com/ginobilinie/medSynthesisV1/tree/1fd202c5928466ef9b11cfebc4490341899312e7
Conv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tp/ctphy5pjrbocsqivqoycown67e7tivt2hqux74cyakixoykbezwo.py # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # input_1 => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_2, [1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) x2 = xindex tmp0 = (-1) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uj/cujexvhfquvwa2b5g6ftj7xjnepkxifmydmocky5zo4utu4xskqq.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 3) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6), (6, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_2, buf0, 24, grid=grid(24), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 6), (0, 6, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 3), (12, 3, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 12, grid=grid(12), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 3), (3, 1), 0), primals_1, reinterpret_tensor(buf0, (1, 4, 6), (24, 6, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Conv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='same', dilation=1, groups=1, bias=True): super(Conv1d, self).__init__(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding=0, dilation=dilation, groups=groups, bias=bias, padding_mode='zeros') assert padding in ['valid', 'same', 'causal'] if padding == 'valid': self.pre_padding = None elif padding == 'same': self.pre_padding = nn.ConstantPad1d(padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2), value=0) elif padding == 'causal': self.pre_padding = nn.ConstantPad1d(padding=(kernel_size - 1, 0 ), value=0) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input): weight = self.weight if self.noise is not None and self.training: weight = weight + self.vn_std * self.noise if self.pre_padding is not None: input = self.pre_padding(input) return F.conv1d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 3 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6), (6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(24)](primals_2, buf0, 24, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 6 ), (0, 6, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 3), (12, 3, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(12)](buf2, primals_3, 12, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 3), (3, 1), 0 ), primals_1, reinterpret_tensor(buf0, (1, 4, 6), (24, 6, 1), 0) class Conv1dNew(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='same', dilation=1, groups=1, bias=True): super(Conv1dNew, self).__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= stride, padding=0, dilation=dilation, groups=groups, bias=bias, padding_mode='zeros') assert padding in ['valid', 'same', 'causal'] if padding == 'valid': self.pre_padding = None elif padding == 'same': self.pre_padding = nn.ConstantPad1d(padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2), value=0) elif padding == 'causal': self.pre_padding = nn.ConstantPad1d(padding=(kernel_size - 1, 0 ), value=0) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
gheyret/EfficientConformer
Conv1d
false
15,429
[ "Apache-2.0" ]
101
b28a0aaa3b182f72abaccbeb12df0402adf96097
https://github.com/gheyret/EfficientConformer/tree/b28a0aaa3b182f72abaccbeb12df0402adf96097
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7v/c7vse4ttxzdbru7dfocfs3ww7nwsyfx6sr45vxlp52c3tp32neoh.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 0, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 6 x2 = (xindex // 24) x3 = xindex % 24 x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-4) + x3 + (16*x2)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zd/czdnvxomk4eszij32s3lplwqtm76wnpfvm4wiwhrw2rhdjp2ogia.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.constant_pad_nd] # Source node to ATen node mapping: # x_1 => convolution # x_2 => constant_pad_nd_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution, [1, 1, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_convolution_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x4 = (xindex // 6) x2 = (xindex // 24) % 4 x5 = xindex tmp0 = (-1) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-1) + x0 + (4*x4)), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x2), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + (x5), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/44/c44cszqvpf2gpsv3a7r2ty4gybhcwcagja4qfcv6v2z7birhmw3a.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x_4 => constant_pad_nd_2 # Graph fragment: # %constant_pad_nd_2 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_2 = async_compile.triton('triton_poi_fused_constant_pad_nd_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) x2 = xindex tmp0 = (-1) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ad/cadvxkdcfg3ctfq2bzps3n3fnxzyo3lapisq5w45vitb3ptttep2.py # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.constant_pad_nd] # Source node to ATen node mapping: # x_5 => convolution_2 # x_6 => constant_pad_nd_3 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %constant_pad_nd_3 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution_2, [0, 0, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_convolution_3 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 6 x4 = (xindex // 24) x5 = xindex % 24 x2 = (xindex // 24) % 4 x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-4) + x5 + (16*x4)), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x2), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + (x6), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3v/c3vkksfv6yaxxvqjtbkbreldbynuman3tm7flauw4zt5m6vl3rdw.py # Topologically Sorted Source Nodes: [x_3, x_7, out], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # out => add # x_3 => convolution_1 # x_7 => convolution_3 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_3, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %convolution_3), kwargs = {}) triton_poi_fused_add_convolution_4 = async_compile.triton('triton_poi_fused_add_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 384, grid=grid(384), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_convolution_1.run(buf1, primals_3, buf2, 384, grid=grid(384), stream=stream0) del buf1 del primals_3 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_2.run(primals_1, buf4, 384, grid=grid(384), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_convolution_3.run(buf5, primals_7, buf6, 384, grid=grid(384), stream=stream0) del buf5 del primals_7 # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_3, x_7, out], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_4.run(buf8, primals_5, buf7, primals_9, 256, grid=grid(256), stream=stream0) del buf7 del primals_5 del primals_9 return (buf8, primals_2, primals_4, primals_6, primals_8, buf0, buf2, buf4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 1), (12, 3, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 3, 1), (12, 3, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn import torch.nn.parallel class Conv2D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, padding= 'same', stride=1, dilation=1, groups=1): super(Conv2D, self).__init__() assert type(kernel_size) in [int, tuple ], 'Allowed kernel type [int or tuple], not {}'.format(type( kernel_size)) assert padding == 'same', 'Allowed padding type {}, not {}'.format( 'same', padding) self.kernel_size = kernel_size if isinstance(kernel_size, tuple): self.h_kernel = kernel_size[0] self.w_kernel = kernel_size[1] else: self.h_kernel = kernel_size self.w_kernel = kernel_size self.padding = padding self.stride = stride self.dilation = dilation self.groups = groups self.conv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=self.stride, dilation=self.dilation, groups=self.groups) def forward(self, x): if self.padding == 'same': height, width = x.shape[2:] h_pad_need = max(0, (height - 1) * self.stride + self.h_kernel - height) w_pad_need = max(0, (width - 1) * self.stride + self.w_kernel - width) pad_left = w_pad_need // 2 pad_right = w_pad_need - pad_left pad_top = h_pad_need // 2 pad_bottom = h_pad_need - pad_top padding = pad_left, pad_right, pad_top, pad_bottom x = F.pad(x, padding, 'constant', 0) x = self.conv(x) return x class GCN(nn.Module): """ Large Kernel Matters -- https://arxiv.org/abs/1703.02719 """ def __init__(self, in_channels, out_channels, k=3): super(GCN, self).__init__() self.conv_l1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') self.conv_l2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') def forward(self, x): x1 = self.conv_l1(x) x1 = self.conv_l2(x1) x2 = self.conv_r1(x) x2 = self.conv_r2(x2) out = x1 + x2 return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F from torch import nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x2 = xindex // 24 x3 = xindex % 24 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x4 = xindex // 6 x2 = xindex // 24 % 4 x5 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x4), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + x5, tmp10, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_convolution_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x4 = xindex // 24 x5 = xindex % 24 x2 = xindex // 24 % 4 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-4 + x5 + 16 * x4), tmp5 & xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x2, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp5, tmp8, tmp9) tl.store(out_ptr0 + x6, tmp10, xmask) @triton.jit def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(384)](primals_1, buf0, 384, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_1[grid(384)](buf1, primals_3, buf2, 384, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 6), (96, 24, 6, 1), torch.float32) triton_poi_fused_constant_pad_nd_2[grid(384)](primals_1, buf4, 384, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32) triton_poi_fused_constant_pad_nd_convolution_3[grid(384)](buf5, primals_7, buf6, 384, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del primals_7 buf7 = extern_kernels.convolution(buf6, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = buf3 del buf3 triton_poi_fused_add_convolution_4[grid(256)](buf8, primals_5, buf7, primals_9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 del primals_5 del primals_9 return (buf8, primals_2, primals_4, primals_6, primals_8, buf0, buf2, buf4, buf6) class Conv2D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, padding= 'same', stride=1, dilation=1, groups=1): super(Conv2D, self).__init__() assert type(kernel_size) in [int, tuple ], 'Allowed kernel type [int or tuple], not {}'.format(type( kernel_size)) assert padding == 'same', 'Allowed padding type {}, not {}'.format( 'same', padding) self.kernel_size = kernel_size if isinstance(kernel_size, tuple): self.h_kernel = kernel_size[0] self.w_kernel = kernel_size[1] else: self.h_kernel = kernel_size self.w_kernel = kernel_size self.padding = padding self.stride = stride self.dilation = dilation self.groups = groups self.conv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=self.stride, dilation=self.dilation, groups=self.groups) def forward(self, x): if self.padding == 'same': height, width = x.shape[2:] h_pad_need = max(0, (height - 1) * self.stride + self.h_kernel - height) w_pad_need = max(0, (width - 1) * self.stride + self.w_kernel - width) pad_left = w_pad_need // 2 pad_right = w_pad_need - pad_left pad_top = h_pad_need // 2 pad_bottom = h_pad_need - pad_top padding = pad_left, pad_right, pad_top, pad_bottom x = F.pad(x, padding, 'constant', 0) x = self.conv(x) return x class GCNNew(nn.Module): """ Large Kernel Matters -- https://arxiv.org/abs/1703.02719 """ def __init__(self, in_channels, out_channels, k=3): super(GCNNew, self).__init__() self.conv_l1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') self.conv_l2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r1 = Conv2D(in_channels=in_channels, out_channels= out_channels, kernel_size=(1, k), padding='same') self.conv_r2 = Conv2D(in_channels=out_channels, out_channels= out_channels, kernel_size=(k, 1), padding='same') def forward(self, input_0): primals_2 = self.conv_l1.conv.weight primals_3 = self.conv_l1.conv.bias primals_4 = self.conv_l2.conv.weight primals_5 = self.conv_l2.conv.bias primals_6 = self.conv_r1.conv.weight primals_7 = self.conv_r1.conv.bias primals_8 = self.conv_r2.conv.weight primals_9 = self.conv_r2.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
gist-ailab/uoais
GCN
false
15,430
[ "BSD-2-Clause" ]
52
fb42d9a96cd54daad61c956d8d9d65dd0ebef4c7
https://github.com/gist-ailab/uoais/tree/fb42d9a96cd54daad61c956d8d9d65dd0ebef4c7
maxPool23DUinit
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ud/cude6zl4nio2ly5l3l5cwlmxkoqtt4qkekbvrzk6nz7rpwc6ypf3.py # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # max_pool2d => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=float("-inf")) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1) + (16*x2)), tmp16 & xmask, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-3) + x0 + (4*x1) + (16*x2)), tmp23 & xmask, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 + x0 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + ((-2) + x0 + (4*x1) + (16*x2)), tmp30 & xmask, other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = x1 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp36 & tmp9 tmp38 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1) + (16*x2)), tmp37 & xmask, other=float("-inf")) tmp39 = triton_helpers.maximum(tmp38, tmp32) tmp40 = tmp36 & tmp15 tmp41 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp40 & xmask, other=float("-inf")) tmp42 = triton_helpers.maximum(tmp41, tmp39) tmp43 = tmp36 & tmp22 tmp44 = tl.load(in_ptr0 + (1 + x0 + (4*x1) + (16*x2)), tmp43 & xmask, other=float("-inf")) tmp45 = triton_helpers.maximum(tmp44, tmp42) tmp46 = tmp36 & tmp29 tmp47 = tl.load(in_ptr0 + (2 + x0 + (4*x1) + (16*x2)), tmp46 & xmask, other=float("-inf")) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = 1 + x1 tmp50 = tmp49 >= tmp1 tmp51 = tmp49 < tmp3 tmp52 = tmp50 & tmp51 tmp53 = tmp52 & tmp9 tmp54 = tl.load(in_ptr0 + (3 + x0 + (4*x1) + (16*x2)), tmp53 & xmask, other=float("-inf")) tmp55 = triton_helpers.maximum(tmp54, tmp48) tmp56 = tmp52 & tmp15 tmp57 = tl.load(in_ptr0 + (4 + x0 + (4*x1) + (16*x2)), tmp56 & xmask, other=float("-inf")) tmp58 = triton_helpers.maximum(tmp57, tmp55) tmp59 = tmp52 & tmp22 tmp60 = tl.load(in_ptr0 + (5 + x0 + (4*x1) + (16*x2)), tmp59 & xmask, other=float("-inf")) tmp61 = triton_helpers.maximum(tmp60, tmp58) tmp62 = tmp52 & tmp29 tmp63 = tl.load(in_ptr0 + (6 + x0 + (4*x1) + (16*x2)), tmp62 & xmask, other=float("-inf")) tmp64 = triton_helpers.maximum(tmp63, tmp61) tmp65 = 2 + x1 tmp66 = tmp65 >= tmp1 tmp67 = tmp65 < tmp3 tmp68 = tmp66 & tmp67 tmp69 = tmp68 & tmp9 tmp70 = tl.load(in_ptr0 + (7 + x0 + (4*x1) + (16*x2)), tmp69 & xmask, other=float("-inf")) tmp71 = triton_helpers.maximum(tmp70, tmp64) tmp72 = tmp68 & tmp15 tmp73 = tl.load(in_ptr0 + (8 + x0 + (4*x1) + (16*x2)), tmp72 & xmask, other=float("-inf")) tmp74 = triton_helpers.maximum(tmp73, tmp71) tmp75 = tmp68 & tmp22 tmp76 = tl.load(in_ptr0 + (9 + x0 + (4*x1) + (16*x2)), tmp75 & xmask, other=float("-inf")) tmp77 = triton_helpers.maximum(tmp76, tmp74) tmp78 = tmp68 & tmp29 tmp79 = tl.load(in_ptr0 + (10 + x0 + (4*x1) + (16*x2)), tmp78 & xmask, other=float("-inf")) tmp80 = triton_helpers.maximum(tmp79, tmp77) tl.store(out_ptr0 + (x4), tmp80, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init class maxPool23DUinit(nn.Module): def __init__(self, kernel_size, stride, padding=1, dilation=1, nd=2): super(maxPool23DUinit, self).__init__() assert nd == 1 or nd == 2 or nd == 3, 'nd is not correctly specified!!!!, it should be {1,2,3}' if nd == 2: self.pool1 = nn.MaxPool2d(kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation) elif nd == 3: self.pool1 = nn.MaxPool3d(kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation) else: self.pool1 = nn.MaxPool1d(kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation) def forward(self, x): return self.pool1(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4, 'stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=float('-inf')) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + x0 + 4 * x1 + 16 * x2), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 + x0 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + (-2 + x0 + 4 * x1 + 16 * x2), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = x1 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp36 & tmp9 tmp38 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp37 & xmask, other=float('-inf')) tmp39 = triton_helpers.maximum(tmp38, tmp32) tmp40 = tmp36 & tmp15 tmp41 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp40 & xmask, other =float('-inf')) tmp42 = triton_helpers.maximum(tmp41, tmp39) tmp43 = tmp36 & tmp22 tmp44 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), tmp43 & xmask, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp42) tmp46 = tmp36 & tmp29 tmp47 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), tmp46 & xmask, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = 1 + x1 tmp50 = tmp49 >= tmp1 tmp51 = tmp49 < tmp3 tmp52 = tmp50 & tmp51 tmp53 = tmp52 & tmp9 tmp54 = tl.load(in_ptr0 + (3 + x0 + 4 * x1 + 16 * x2), tmp53 & xmask, other=float('-inf')) tmp55 = triton_helpers.maximum(tmp54, tmp48) tmp56 = tmp52 & tmp15 tmp57 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), tmp56 & xmask, other=float('-inf')) tmp58 = triton_helpers.maximum(tmp57, tmp55) tmp59 = tmp52 & tmp22 tmp60 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), tmp59 & xmask, other=float('-inf')) tmp61 = triton_helpers.maximum(tmp60, tmp58) tmp62 = tmp52 & tmp29 tmp63 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), tmp62 & xmask, other=float('-inf')) tmp64 = triton_helpers.maximum(tmp63, tmp61) tmp65 = 2 + x1 tmp66 = tmp65 >= tmp1 tmp67 = tmp65 < tmp3 tmp68 = tmp66 & tmp67 tmp69 = tmp68 & tmp9 tmp70 = tl.load(in_ptr0 + (7 + x0 + 4 * x1 + 16 * x2), tmp69 & xmask, other=float('-inf')) tmp71 = triton_helpers.maximum(tmp70, tmp64) tmp72 = tmp68 & tmp15 tmp73 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), tmp72 & xmask, other=float('-inf')) tmp74 = triton_helpers.maximum(tmp73, tmp71) tmp75 = tmp68 & tmp22 tmp76 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), tmp75 & xmask, other=float('-inf')) tmp77 = triton_helpers.maximum(tmp76, tmp74) tmp78 = tmp68 & tmp29 tmp79 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), tmp78 & xmask, other=float('-inf')) tmp80 = triton_helpers.maximum(tmp79, tmp77) tl.store(out_ptr0 + x4, tmp80, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(144)](arg0_1, buf0, 144, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class maxPool23DUinitNew(nn.Module): def __init__(self, kernel_size, stride, padding=1, dilation=1, nd=2): super(maxPool23DUinitNew, self).__init__() assert nd == 1 or nd == 2 or nd == 3, 'nd is not correctly specified!!!!, it should be {1,2,3}' if nd == 2: self.pool1 = nn.MaxPool2d(kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation) elif nd == 3: self.pool1 = nn.MaxPool3d(kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation) else: self.pool1 = nn.MaxPool1d(kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ginobilinie/medSynthesisV1
maxPool23DUinit
false
15,431
[ "MIT" ]
166
1fd202c5928466ef9b11cfebc4490341899312e7
https://github.com/ginobilinie/medSynthesisV1/tree/1fd202c5928466ef9b11cfebc4490341899312e7
residualUnit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/w2/cw23sxahiao23shoty6aralwbjlgswuoghgwkn6wmugklirv7nz3.py # Topologically Sorted Source Nodes: [conv2d, elu], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d => convolution # elu => expm1, gt, mul, mul_2, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/l3/cl3kktfjbfxvoqsgvjon5fk5ycnqjp7n2a3dk3gk4gw4n2jfe25m.py # Topologically Sorted Source Nodes: [conv2d_1, add, relu], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add => add # conv2d_1 => convolution_1 # relu => relu # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, elu], Original ATen: [aten.convolution, aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_elu_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_1, add, relu], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_convolution_relu_threshold_backward_1.run(buf4, primals_5, primals_3, buf5, 256, grid=grid(256), stream=stream0) del primals_5 return (buf4, primals_1, primals_3, primals_4, buf1, buf2, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init import torch.nn.init class conv23DUnit(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, bias=True, dilation=1, nd=2): super(conv23DUnit, self).__init__() assert nd == 1 or nd == 2 or nd == 3, 'nd is not correctly specified!!!!, it should be {1,2,3}' if nd == 2: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, dilation=dilation) elif nd == 3: self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, dilation=dilation) else: self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, dilation=dilation) init.xavier_uniform(self.conv.weight, gain=np.sqrt(2.0)) init.constant(self.conv.bias, 0) def forward(self, x): return self.conv(x) class residualUnit(nn.Module): def __init__(self, in_size, out_size, kernel_size=3, stride=1, padding= 1, activation=F.relu, nd=2): super(residualUnit, self).__init__() self.conv1 = conv23DUnit(in_size, out_size, kernel_size, stride, padding, nd=nd) self.conv2 = conv23DUnit(out_size, out_size, kernel_size, stride, padding, nd=nd) def forward(self, x): return F.relu(self.conv2(F.elu(self.conv1(x))) + x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)]( buf4, primals_5, primals_3, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf4, primals_1, primals_3, primals_4, buf1, buf2, buf5 class conv23DUnit(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, bias=True, dilation=1, nd=2): super(conv23DUnit, self).__init__() assert nd == 1 or nd == 2 or nd == 3, 'nd is not correctly specified!!!!, it should be {1,2,3}' if nd == 2: self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, dilation=dilation) elif nd == 3: self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, dilation=dilation) else: self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, dilation=dilation) init.xavier_uniform(self.conv.weight, gain=np.sqrt(2.0)) init.constant(self.conv.bias, 0) def forward(self, x): return self.conv(x) class residualUnitNew(nn.Module): def __init__(self, in_size, out_size, kernel_size=3, stride=1, padding= 1, activation=F.relu, nd=2): super(residualUnitNew, self).__init__() self.conv1 = conv23DUnit(in_size, out_size, kernel_size, stride, padding, nd=nd) self.conv2 = conv23DUnit(out_size, out_size, kernel_size, stride, padding, nd=nd) def forward(self, input_0): primals_1 = self.conv1.conv.weight primals_2 = self.conv1.conv.bias primals_4 = self.conv2.conv.weight primals_5 = self.conv2.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ginobilinie/medSynthesisV1
residualUnit
false
15,432
[ "MIT" ]
166
1fd202c5928466ef9b11cfebc4490341899312e7
https://github.com/ginobilinie/medSynthesisV1/tree/1fd202c5928466ef9b11cfebc4490341899312e7
PACRRConvMax2dModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/xw/cxwrk2fodabatoqbpxcl5eb2ifidbtfcttbbx5hhgzol35c2myr4.py # Topologically Sorted Source Nodes: [simmat], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # simmat => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 3, 0, 3], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 7) % 7 x0 = xindex % 7 x2 = (xindex // 49) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ch/cchl3etcboj5jw4xggmrhxdnvzhxcggabe4regsbgagybypkwnfy.py # Topologically Sorted Source Nodes: [conv2d, conv, max_1], Original ATen: [aten.convolution, aten.relu, aten.max] # Source node to ATen node mapping: # conv => relu # conv2d => convolution # max_1 => max_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%relu, 1), kwargs = {}) triton_poi_fused_convolution_max_relu_1 = async_compile.triton('triton_poi_fused_convolution_max_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_max_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_max_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr1 + (1)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp26 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp27 = tl.load(in_ptr1 + (2)) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp45 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp46 = tl.load(in_ptr1 + (3)) tmp47 = tl.broadcast_to(tmp46, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp9 = tmp6 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp11 = tmp5 > tmp10 tmp12 = tmp5 == tmp10 tmp13 = tmp5 != tmp5 tmp14 = tmp10 != tmp10 tmp15 = tmp13 > tmp14 tmp16 = tmp11 | tmp15 tmp17 = tmp13 & tmp14 tmp18 = tmp12 | tmp17 tmp19 = tl.full([1], 0, tl.int64) tmp20 = tl.full([1], 1, tl.int64) tmp21 = tmp19 < tmp20 tmp22 = tmp18 & tmp21 tmp23 = tmp16 | tmp22 tmp24 = tl.where(tmp23, tmp5, tmp10) tmp25 = tl.where(tmp23, tmp19, tmp20) tmp29 = tmp26 + tmp28 tmp30 = triton_helpers.maximum(tmp4, tmp29) tmp31 = tmp24 > tmp30 tmp32 = tmp24 == tmp30 tmp33 = tmp24 != tmp24 tmp34 = tmp30 != tmp30 tmp35 = tmp33 > tmp34 tmp36 = tmp31 | tmp35 tmp37 = tmp33 & tmp34 tmp38 = tmp32 | tmp37 tmp39 = tl.full([1], 2, tl.int64) tmp40 = tmp25 < tmp39 tmp41 = tmp38 & tmp40 tmp42 = tmp36 | tmp41 tmp43 = tl.where(tmp42, tmp24, tmp30) tmp44 = tl.where(tmp42, tmp25, tmp39) tmp48 = tmp45 + tmp47 tmp49 = triton_helpers.maximum(tmp4, tmp48) tmp50 = tmp43 > tmp49 tmp51 = tmp43 == tmp49 tmp52 = tmp43 != tmp43 tmp53 = tmp49 != tmp49 tmp54 = tmp52 > tmp53 tmp55 = tmp50 | tmp54 tmp56 = tmp52 & tmp53 tmp57 = tmp51 | tmp56 tmp58 = tl.full([1], 3, tl.int64) tmp59 = tmp44 < tmp58 tmp60 = tmp57 & tmp59 tmp61 = tmp55 | tmp60 tmp62 = tl.where(tmp61, tmp43, tmp49) tmp63 = tl.where(tmp61, tmp44, tmp58) tmp64 = triton_helpers.maximum(tmp5, tmp10) tmp65 = triton_helpers.maximum(tmp64, tmp30) tmp66 = triton_helpers.maximum(tmp65, tmp49) tl.store(out_ptr0 + (x2), tmp63, xmask) tl.store(out_ptr1 + (x2), tmp66, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/35/c35vxy5oullorpkglb324ij376h6o5mbjsqjoykh7yiskiyinasn.py # Topologically Sorted Source Nodes: [conv2d, conv], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv => relu # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [simmat], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, conv, max_1], Original ATen: [aten.convolution, aten.relu, aten.max] triton_poi_fused_convolution_max_relu_1.run(buf1, primals_3, buf2, buf3, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [conv2d, conv, max_1, topk], Original ATen: [aten.convolution, aten.relu, aten.max, aten.topk] buf4 = torch.ops.aten.topk.default(buf3, 4, 2) del buf3 buf5 = buf4[0] buf6 = buf4[1] del buf4 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d, conv], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf1, primals_3, buf7, 256, grid=grid(256), stream=stream0) del buf1 del primals_3 return (buf5, primals_2, buf0, buf6, reinterpret_tensor(buf2, (4, 1, 4, 4), (16, 16, 4, 1), 0), buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class PACRRConvMax2dModule(torch.nn.Module): def __init__(self, shape, n_filters, k, channels): super().__init__() self.shape = shape if shape != 1: self.pad = torch.nn.ConstantPad2d((0, shape - 1, 0, shape - 1), 0) else: self.pad = None self.conv = torch.nn.Conv2d(channels, n_filters, shape) self.activation = torch.nn.ReLU() self.k = k self.shape = shape self.channels = channels def forward(self, simmat): BATCH, _CHANNELS, QLEN, DLEN = simmat.shape if self.pad: simmat = self.pad(simmat) conv = self.activation(self.conv(simmat)) top_filters, _ = conv.max(dim=1) if DLEN < self.k: top_filters = torch.nn.functional.pad(top_filters, (0, self.k - DLEN)) top_toks, _ = top_filters.topk(self.k, dim=2) result = top_toks.reshape(BATCH, QLEN, self.k) return result def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'shape': 4, 'n_filters': 4, 'k': 4, 'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_max_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp26 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr1 + 2) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp45 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp46 = tl.load(in_ptr1 + 3) tmp47 = tl.broadcast_to(tmp46, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp9 = tmp6 + tmp8 tmp10 = triton_helpers.maximum(tmp4, tmp9) tmp11 = tmp5 > tmp10 tmp12 = tmp5 == tmp10 tmp13 = tmp5 != tmp5 tmp14 = tmp10 != tmp10 tmp15 = tmp13 > tmp14 tmp16 = tmp11 | tmp15 tmp17 = tmp13 & tmp14 tmp18 = tmp12 | tmp17 tmp19 = tl.full([1], 0, tl.int64) tmp20 = tl.full([1], 1, tl.int64) tmp21 = tmp19 < tmp20 tmp22 = tmp18 & tmp21 tmp23 = tmp16 | tmp22 tmp24 = tl.where(tmp23, tmp5, tmp10) tmp25 = tl.where(tmp23, tmp19, tmp20) tmp29 = tmp26 + tmp28 tmp30 = triton_helpers.maximum(tmp4, tmp29) tmp31 = tmp24 > tmp30 tmp32 = tmp24 == tmp30 tmp33 = tmp24 != tmp24 tmp34 = tmp30 != tmp30 tmp35 = tmp33 > tmp34 tmp36 = tmp31 | tmp35 tmp37 = tmp33 & tmp34 tmp38 = tmp32 | tmp37 tmp39 = tl.full([1], 2, tl.int64) tmp40 = tmp25 < tmp39 tmp41 = tmp38 & tmp40 tmp42 = tmp36 | tmp41 tmp43 = tl.where(tmp42, tmp24, tmp30) tmp44 = tl.where(tmp42, tmp25, tmp39) tmp48 = tmp45 + tmp47 tmp49 = triton_helpers.maximum(tmp4, tmp48) tmp50 = tmp43 > tmp49 tmp51 = tmp43 == tmp49 tmp52 = tmp43 != tmp43 tmp53 = tmp49 != tmp49 tmp54 = tmp52 > tmp53 tmp55 = tmp50 | tmp54 tmp56 = tmp52 & tmp53 tmp57 = tmp51 | tmp56 tmp58 = tl.full([1], 3, tl.int64) tmp59 = tmp44 < tmp58 tmp60 = tmp57 & tmp59 tmp61 = tmp55 | tmp60 tl.where(tmp61, tmp43, tmp49) tmp63 = tl.where(tmp61, tmp44, tmp58) tmp64 = triton_helpers.maximum(tmp5, tmp10) tmp65 = triton_helpers.maximum(tmp64, tmp30) tmp66 = triton_helpers.maximum(tmp65, tmp49) tl.store(out_ptr0 + x2, tmp63, xmask) tl.store(out_ptr1 + x2, tmp66, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_max_relu_1[grid(64)](buf1, primals_3, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = torch.ops.aten.topk.default(buf3, 4, 2) del buf3 buf5 = buf4[0] buf6 = buf4[1] del buf4 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf1, primals_3, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 return buf5, primals_2, buf0, buf6, reinterpret_tensor(buf2, (4, 1, 4, 4), (16, 16, 4, 1), 0), buf7 class PACRRConvMax2dModuleNew(torch.nn.Module): def __init__(self, shape, n_filters, k, channels): super().__init__() self.shape = shape if shape != 1: self.pad = torch.nn.ConstantPad2d((0, shape - 1, 0, shape - 1), 0) else: self.pad = None self.conv = torch.nn.Conv2d(channels, n_filters, shape) self.activation = torch.nn.ReLU() self.k = k self.shape = shape self.channels = channels def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
gitter-badger/FlexNeuART
PACRRConvMax2dModule
false
15,433
[ "Apache-2.0" ]
101
f69e5421bdebe9db0d993b5470dace61872f90df
https://github.com/gitter-badger/FlexNeuART/tree/f69e5421bdebe9db0d993b5470dace61872f90df
VisErrorLossV3
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/dm/cdmkn36c5fo3f5jq74sgbr3wzu3x4q4yghzpny3gcojrijurayhy.py # Topologically Sorted Source Nodes: [amplitude_1, amplitude_2, amplitude], Original ATen: [aten.max] # Source node to ATen node mapping: # amplitude => max_1 # amplitude_1 => max_2 # amplitude_2 => max_3 # Graph fragment: # %max_2 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%arg1_1,), kwargs = {}) # %max_3 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%arg1_1,), kwargs = {}) # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%arg1_1,), kwargs = {}) triton_per_fused_max_0 = async_compile.triton('triton_per_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp3, None) tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp3, None) tl.store(out_ptr2 + (tl.full([1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/es/cesgqebjkbtoh73uqodp5bypmio2kkdts36wj2dxtq2tebbucdno.py # Topologically Sorted Source Nodes: [sub_1, diff_1, truediv_4, gt_1, vismap_1, eq_2, pos_ids_1, float_5, mul_4, sum_9, float_6, sum_11, truediv_5, le_1, eq_3, neg_ids_1, float_7, mul_5, sum_13, float_8, sum_15, sub_2, diff_2, truediv_8, gt_2, vismap_2, eq_4, pos_ids_2, float_9, mul_8, sum_17, float_10, sum_19, truediv_9, le_2, eq_5, neg_ids_2, float_11, mul_9, sum_21, float_12, sum_23, hm_preds, hm_preds_1, sub, diff, truediv, gt, vismap, eq, pos_ids, float_1, mul, sum_1, float_2, sum_3, truediv_1, le, eq_1, neg_ids, float_3, mul_1, sum_5, float_4, sum_7], Original ATen: [aten.sub, aten.abs, aten.div, aten.gt, aten.repeat, aten.eq, aten.bitwise_and, aten._to_copy, aten.mul, aten.sum, aten.le, aten.relu, aten.view] # Source node to ATen node mapping: # diff => abs_1 # diff_1 => abs_2 # diff_2 => abs_3 # eq => eq # eq_1 => eq_1 # eq_2 => eq_2 # eq_3 => eq_3 # eq_4 => eq_4 # eq_5 => eq_5 # float_1 => convert_element_type # float_10 => convert_element_type_9 # float_11 => convert_element_type_10 # float_12 => convert_element_type_11 # float_2 => convert_element_type_1 # float_3 => convert_element_type_2 # float_4 => convert_element_type_3 # float_5 => convert_element_type_4 # float_6 => convert_element_type_5 # float_7 => convert_element_type_6 # float_8 => convert_element_type_7 # float_9 => convert_element_type_8 # gt => gt # gt_1 => gt_1 # gt_2 => gt_2 # hm_preds => relu # hm_preds_1 => view_1 # le => le # le_1 => le_1 # le_2 => le_2 # mul => mul # mul_1 => mul_1 # mul_4 => mul_4 # mul_5 => mul_5 # mul_8 => mul_8 # mul_9 => mul_9 # neg_ids => bitwise_and_1 # neg_ids_1 => bitwise_and_3 # neg_ids_2 => bitwise_and_5 # pos_ids => bitwise_and # pos_ids_1 => bitwise_and_2 # pos_ids_2 => bitwise_and_4 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # sum_1 => sum_1 # sum_11 => sum_11 # sum_13 => sum_13 # sum_15 => sum_15 # sum_17 => sum_17 # sum_19 => sum_19 # sum_21 => sum_21 # sum_23 => sum_23 # sum_3 => sum_3 # sum_5 => sum_5 # sum_7 => sum_7 # sum_9 => sum_9 # truediv => div # truediv_1 => div_1 # truediv_4 => div_4 # truediv_5 => div_5 # truediv_8 => div_8 # truediv_9 => div_9 # vismap => repeat # vismap_1 => repeat_1 # vismap_2 => repeat_2 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %view_4), kwargs = {}) # %abs_2 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_2, 10), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%view_3, %div_4), kwargs = {}) # %repeat_1 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%arg2_1, [1, 1, 64]), kwargs = {}) # %eq_2 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%repeat_1, 1), kwargs = {}) # %bitwise_and_2 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt_1, %eq_2), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_2, torch.float32), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_2, %convert_element_type_4), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [2]), kwargs = {}) # %convert_element_type_5 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_2, torch.float32), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_5, [2]), kwargs = {}) # %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_2, 10), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Tensor](args = (%view_3, %div_5), kwargs = {}) # %eq_3 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%repeat_1, 1), kwargs = {}) # %bitwise_and_3 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%le_1, %eq_3), kwargs = {}) # %convert_element_type_6 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_3, torch.float32), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_2, %convert_element_type_6), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [2]), kwargs = {}) # %convert_element_type_7 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_3, torch.float32), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_7, [2]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_6, %view_7), kwargs = {}) # %abs_3 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%sub_2,), kwargs = {}) # %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_3, 10), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%view_6, %div_8), kwargs = {}) # %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%arg2_1, [1, 1, 64]), kwargs = {}) # %eq_4 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%repeat_2, 1), kwargs = {}) # %bitwise_and_4 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt_2, %eq_4), kwargs = {}) # %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_4, torch.float32), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_3, %convert_element_type_8), kwargs = {}) # %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_8, [2]), kwargs = {}) # %convert_element_type_9 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_4, torch.float32), kwargs = {}) # %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_9, [2]), kwargs = {}) # %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_3, 10), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Tensor](args = (%view_6, %div_9), kwargs = {}) # %eq_5 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%repeat_2, 1), kwargs = {}) # %bitwise_and_5 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%le_2, %eq_5), kwargs = {}) # %convert_element_type_10 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_5, torch.float32), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_3, %convert_element_type_10), kwargs = {}) # %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_9, [2]), kwargs = {}) # %convert_element_type_11 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_5, torch.float32), kwargs = {}) # %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_11, [2]), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %view_1 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%relu, [4, 4, -1]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {}) # %abs_1 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_1, 10), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%view, %div), kwargs = {}) # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%arg2_1, [1, 1, 64]), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%repeat, 1), kwargs = {}) # %bitwise_and : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt, %eq), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, %convert_element_type), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and, torch.float32), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_1, [2]), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%max_1, 10), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Tensor](args = (%view, %div_1), kwargs = {}) # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%repeat, 1), kwargs = {}) # %bitwise_and_1 : [num_users=2] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%le, %eq_1), kwargs = {}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_1, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, %convert_element_type_2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {}) # %convert_element_type_3 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_1, torch.float32), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%convert_element_type_3, [2]), kwargs = {}) triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_relu_repeat_sub_sum_view_1 = async_compile.triton('triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_relu_repeat_sub_sum_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: 'i32', 20: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_relu_repeat_sub_sum_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 12, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_relu_repeat_sub_sum_view_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr4 + (0)) tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp68 = tl.load(in_ptr5 + (r1 + (64*x0)), xmask, other=0.0) tmp72 = tl.load(in_ptr6 + (0)) tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = tmp0 - tmp3 tmp5 = tl_math.abs(tmp4) tmp8 = 0.1 tmp9 = tmp7 * tmp8 tmp10 = tmp0 > tmp9 tmp12 = 1.0 tmp13 = tmp11 == tmp12 tmp14 = tmp10 & tmp13 tmp15 = tmp14.to(tl.float32) tmp16 = tmp5 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp0 <= tmp9 tmp22 = tmp21 & tmp13 tmp23 = tmp22.to(tl.float32) tmp24 = tmp5 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.where(xmask, tmp25, 0) tmp28 = tl.sum(tmp27, 1)[:, None] tmp29 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp31 = tl.where(xmask, tmp29, 0) tmp32 = tl.sum(tmp31, 1)[:, None] tmp33 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp38 = triton_helpers.maximum(tmp2, tmp37) tmp39 = tmp0 - tmp38 tmp40 = tl_math.abs(tmp39) tmp43 = tmp42 * tmp8 tmp44 = tmp0 > tmp43 tmp45 = tmp44 & tmp13 tmp46 = tmp45.to(tl.float32) tmp47 = tmp40 * tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.where(xmask, tmp48, 0) tmp51 = tl.sum(tmp50, 1)[:, None] tmp52 = tmp0 <= tmp43 tmp53 = tmp52 & tmp13 tmp54 = tmp53.to(tl.float32) tmp55 = tmp40 * tmp54 tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.where(xmask, tmp56, 0) tmp59 = tl.sum(tmp58, 1)[:, None] tmp60 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK]) tmp62 = tl.where(xmask, tmp60, 0) tmp63 = tl.sum(tmp62, 1)[:, None] tmp64 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp66 = tl.where(xmask, tmp64, 0) tmp67 = tl.sum(tmp66, 1)[:, None] tmp69 = triton_helpers.maximum(tmp2, tmp68) tmp70 = tmp0 - tmp69 tmp71 = tl_math.abs(tmp70) tmp74 = tmp73 * tmp8 tmp75 = tmp0 > tmp74 tmp76 = tmp75 & tmp13 tmp77 = tmp76.to(tl.float32) tmp78 = tmp71 * tmp77 tmp79 = tl.broadcast_to(tmp78, [XBLOCK, RBLOCK]) tmp81 = tl.where(xmask, tmp79, 0) tmp82 = tl.sum(tmp81, 1)[:, None] tmp83 = tmp0 <= tmp74 tmp84 = tmp83 & tmp13 tmp85 = tmp84.to(tl.float32) tmp86 = tmp71 * tmp85 tmp87 = tl.broadcast_to(tmp86, [XBLOCK, RBLOCK]) tmp89 = tl.where(xmask, tmp87, 0) tmp90 = tl.sum(tmp89, 1)[:, None] tmp91 = tl.broadcast_to(tmp77, [XBLOCK, RBLOCK]) tmp93 = tl.where(xmask, tmp91, 0) tmp94 = tl.sum(tmp93, 1)[:, None] tmp95 = tl.broadcast_to(tmp85, [XBLOCK, RBLOCK]) tmp97 = tl.where(xmask, tmp95, 0) tmp98 = tl.sum(tmp97, 1)[:, None] tl.store(out_ptr0 + (x0), tmp20, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) tl.store(out_ptr2 + (x0), tmp32, xmask) tl.store(out_ptr3 + (x0), tmp36, xmask) tl.store(out_ptr4 + (x0), tmp51, xmask) tl.store(out_ptr5 + (x0), tmp59, xmask) tl.store(out_ptr6 + (x0), tmp63, xmask) tl.store(out_ptr7 + (x0), tmp67, xmask) tl.store(out_ptr8 + (x0), tmp82, xmask) tl.store(out_ptr9 + (x0), tmp90, xmask) tl.store(out_ptr10 + (x0), tmp94, xmask) tl.store(out_ptr11 + (x0), tmp98, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/la/claqn2rhafwydbl5wcdrecdfhka6yuepajc3ejfzcnz4eb5guzpw.py # Topologically Sorted Source Nodes: [sum_10, sum_12, add_3, pos_loss_1, mul_6, sum_14, sum_16, add_4, neg_loss_1, mul_7, total_loss_1], Original ATen: [aten.sum, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add_3 => add_3 # add_4 => add_4 # mul_6 => mul_6 # mul_7 => mul_7 # neg_loss_1 => div_7 # pos_loss_1 => div_6 # sum_10 => sum_10 # sum_12 => sum_12 # sum_14 => sum_14 # sum_16 => sum_16 # total_loss_1 => add_5 # Graph fragment: # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_9, [0]), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_11, [0]), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_12, 0.0001), kwargs = {}) # %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_10, %add_3), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_6, 0.5), kwargs = {}) # %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_13, [0]), kwargs = {}) # %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_15, [0]), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_16, 0.0001), kwargs = {}) # %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_14, %add_4), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_7, 0.5), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %mul_7), kwargs = {}) triton_poi_fused_add_div_mul_sum_2 = async_compile.triton('triton_poi_fused_add_div_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp7 = tl.load(in_ptr1 + (x0), xmask) tmp8 = tl.load(in_ptr1 + (4 + x0), xmask) tmp10 = tl.load(in_ptr1 + (8 + x0), xmask) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask) tmp19 = tl.load(in_ptr2 + (x0), xmask) tmp20 = tl.load(in_ptr2 + (4 + x0), xmask) tmp22 = tl.load(in_ptr2 + (8 + x0), xmask) tmp24 = tl.load(in_ptr2 + (12 + x0), xmask) tmp26 = tl.load(in_ptr3 + (x0), xmask) tmp27 = tl.load(in_ptr3 + (4 + x0), xmask) tmp29 = tl.load(in_ptr3 + (8 + x0), xmask) tmp31 = tl.load(in_ptr3 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tl.store(out_ptr0 + (x0), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zk/czkt5zjkqewwauep7wrsfmtdtevalevm3dc7qo3b6pzlv6bbxwje.py # Topologically Sorted Source Nodes: [loss2], Original ATen: [aten.mean] # Source node to ATen node mapping: # loss2 => mean_1 # Graph fragment: # %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%getitem,), kwargs = {}) triton_per_fused_mean_3 = async_compile.triton('triton_per_fused_mean_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 2], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 2 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/f6/cf6wjppdzyjosppuxmpfq7gf7f257o7tqv3z4gm74arl7nubj4fz.py # Topologically Sorted Source Nodes: [sum_2, sum_4, add, pos_loss, mul_2, sum_6, sum_8, add_1, neg_loss, mul_3, total_loss, loss1, loss2, add_9, loss3, add_10], Original ATen: [aten.sum, aten.add, aten.div, aten.mul, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_10 => add_10 # add_9 => add_9 # loss1 => mean # loss2 => mean_1 # loss3 => mean_2 # mul_2 => mul_2 # mul_3 => mul_3 # neg_loss => div_3 # pos_loss => div_2 # sum_2 => sum_2 # sum_4 => sum_4 # sum_6 => sum_6 # sum_8 => sum_8 # total_loss => add_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [0]), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_3, [0]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, 0.0001), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %add), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, 0.5), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_5, [0]), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_7, [0]), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_8, 0.0001), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, %add_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, 0.5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%add_2,), kwargs = {}) # %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%getitem,), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {}) # %mean_2 : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%getitem_2,), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %mean_2), kwargs = {}) triton_per_fused_add_div_mean_mul_sum_4 = async_compile.triton('triton_per_fused_add_div_mean_mul_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=(8,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sum_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2'], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_sum_4(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (4 + r0), None) tmp3 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp7 = tl.load(in_ptr1 + (r0), None) tmp8 = tl.load(in_ptr1 + (4 + r0), None) tmp10 = tl.load(in_ptr1 + (8 + r0), None) tmp12 = tl.load(in_ptr1 + (12 + r0), None) tmp19 = tl.load(in_ptr2 + (r0), None) tmp20 = tl.load(in_ptr2 + (4 + r0), None) tmp22 = tl.load(in_ptr2 + (8 + r0), None) tmp24 = tl.load(in_ptr2 + (12 + r0), None) tmp26 = tl.load(in_ptr3 + (r0), None) tmp27 = tl.load(in_ptr3 + (4 + r0), None) tmp29 = tl.load(in_ptr3 + (8 + r0), None) tmp31 = tl.load(in_ptr3 + (12 + r0), None) tmp42 = tl.load(in_out_ptr1 + (0)) tmp43 = tl.broadcast_to(tmp42, [XBLOCK, 1]) tmp46 = tl.load(in_out_ptr2 + (0)) tmp47 = tl.broadcast_to(tmp46, [XBLOCK, 1]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 4.0 tmp41 = tmp39 / tmp40 tmp44 = 2.0 tmp45 = tmp43 / tmp44 tmp48 = 1.0 tmp49 = tmp47 / tmp48 tmp50 = tmp41 + tmp45 tmp51 = tmp50 + tmp49 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp41, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp45, None) tl.debug_barrier() tl.store(in_out_ptr2 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp49, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp51, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 64), (256, 64, 1)) assert_size_stride(arg1_1, (4, 4, 16, 4), (256, 64, 4, 1)) assert_size_stride(arg2_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf9 = empty_strided_cuda((), (), torch.float32) buf18 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [amplitude_1, amplitude_2, amplitude], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_per_fused_max_0.run(arg1_1, buf0, buf9, buf18, 1, 1024, grid=grid(1), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1, diff_1, truediv_4, gt_1, vismap_1, eq_2, pos_ids_1, float_5, mul_4, sum_9, float_6, sum_11, truediv_5, le_1, eq_3, neg_ids_1, float_7, mul_5, sum_13, float_8, sum_15, sub_2, diff_2, truediv_8, gt_2, vismap_2, eq_4, pos_ids_2, float_9, mul_8, sum_17, float_10, sum_19, truediv_9, le_2, eq_5, neg_ids_2, float_11, mul_9, sum_21, float_12, sum_23, hm_preds, hm_preds_1, sub, diff, truediv, gt, vismap, eq, pos_ids, float_1, mul, sum_1, float_2, sum_3, truediv_1, le, eq_1, neg_ids, float_3, mul_1, sum_5, float_4, sum_7], Original ATen: [aten.sub, aten.abs, aten.div, aten.gt, aten.repeat, aten.eq, aten.bitwise_and, aten._to_copy, aten.mul, aten.sum, aten.le, aten.relu, aten.view] triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_relu_repeat_sub_sum_view_1.run(arg1_1, arg3_1, buf0, arg2_1, buf9, arg0_1, buf18, buf1, buf3, buf2, buf4, buf10, buf12, buf11, buf13, buf19, buf21, buf20, buf22, 16, 64, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 buf5 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sum_10, sum_12, add_3, pos_loss_1, mul_6, sum_14, sum_16, add_4, neg_loss_1, mul_7, total_loss_1], Original ATen: [aten.sum, aten.add, aten.div, aten.mul] triton_poi_fused_add_div_mul_sum_2.run(buf1, buf2, buf3, buf4, buf5, 4, grid=grid(4), stream=stream0) del buf1 del buf2 del buf3 del buf4 # Topologically Sorted Source Nodes: [topk], Original ATen: [aten.topk] buf6 = torch.ops.aten.topk.default(buf5, 2) buf7 = buf6[0] del buf6 buf14 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [sum_18, sum_20, add_6, pos_loss_2, mul_10, sum_22, sum_24, add_7, neg_loss_2, mul_11, total_loss_3], Original ATen: [aten.sum, aten.add, aten.div, aten.mul] triton_poi_fused_add_div_mul_sum_2.run(buf10, buf11, buf12, buf13, buf14, 4, grid=grid(4), stream=stream0) del buf10 del buf11 del buf12 del buf13 # Topologically Sorted Source Nodes: [topk_1], Original ATen: [aten.topk] buf15 = torch.ops.aten.topk.default(buf14, 1) del buf14 buf16 = buf15[0] del buf15 buf26 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [loss2], Original ATen: [aten.mean] triton_per_fused_mean_3.run(buf7, buf26, 1, 2, grid=grid(1), stream=stream0) del buf7 buf24 = buf18; del buf18 # reuse buf25 = buf24; del buf24 # reuse buf27 = buf26; del buf26 # reuse buf28 = reinterpret_tensor(buf16, (), (), 0); del buf16 # reuse buf29 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sum_2, sum_4, add, pos_loss, mul_2, sum_6, sum_8, add_1, neg_loss, mul_3, total_loss, loss1, loss2, add_9, loss3, add_10], Original ATen: [aten.sum, aten.add, aten.div, aten.mul, aten.mean] triton_per_fused_add_div_mean_mul_sum_4.run(buf25, buf27, buf28, buf19, buf20, buf21, buf22, buf29, 1, 4, grid=grid(1), stream=stream0) del buf19 del buf20 del buf21 del buf22 return (buf29, buf25, buf27, buf28, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 64), (256, 64, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 16, 4), (256, 64, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class VisErrorLossV3(nn.Module): def __init__(self): super(VisErrorLossV3, self).__init__() def compute_l1_weighted_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) amplitude = torch.max(hm_targets) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) pos_ids = (hm_targets > amplitude / 10) & (vismap == 1) neg_ids = (hm_targets <= amplitude / 10) & (vismap == 1) diff = (hm_targets - hm_preds).abs() pos_loss = (diff * pos_ids.float()).sum(2).sum(0) / (pos_ids.float( ).sum(2).sum(0) + epsilon) neg_loss = (diff * neg_ids.float()).sum(2).sum(0) / (neg_ids.float( ).sum(2).sum(0) + epsilon) total_loss = 0.5 * pos_loss + 0.5 * neg_loss if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def compute_l2_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) ids = vismap == 1 diff = (hm_targets - hm_preds) ** 2 total_loss = (diff * ids.float()).sum(2).sum(0) / (ids.float().sum( 2).sum(0) + epsilon) if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def forward(self, hm_targets, hm_preds1, hm_preds2, vismap): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ loss1 = self.compute_l1_weighted_loss(hm_targets, hm_preds1, vismap) loss2 = self.compute_l1_weighted_loss(hm_targets, hm_preds2[0], vismap, ohem=0.5) loss3 = self.compute_l1_weighted_loss(hm_targets, hm_preds2[1], vismap, ohem=0.3) return loss1 + loss2 + loss3, loss1, loss2, loss3 def get_inputs(): return [torch.rand([4, 4, 16, 4]), torch.rand([4, 4, 64]), torch.rand([ 4, 4, 4]), torch.rand([4, 4, 1])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp3, None) tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_relu_repeat_sub_sum_view_1( in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr1 + (16 + x0), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr4 + 0) tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK]) tmp68 = tl.load(in_ptr5 + (r1 + 64 * x0), xmask, other=0.0) tmp72 = tl.load(in_ptr6 + 0) tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp2 = tl.full([1, 1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp1) tmp4 = tmp0 - tmp3 tmp5 = tl_math.abs(tmp4) tmp8 = 0.1 tmp9 = tmp7 * tmp8 tmp10 = tmp0 > tmp9 tmp12 = 1.0 tmp13 = tmp11 == tmp12 tmp14 = tmp10 & tmp13 tmp15 = tmp14.to(tl.float32) tmp16 = tmp5 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp0 <= tmp9 tmp22 = tmp21 & tmp13 tmp23 = tmp22.to(tl.float32) tmp24 = tmp5 * tmp23 tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp27 = tl.where(xmask, tmp25, 0) tmp28 = tl.sum(tmp27, 1)[:, None] tmp29 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp31 = tl.where(xmask, tmp29, 0) tmp32 = tl.sum(tmp31, 1)[:, None] tmp33 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp35 = tl.where(xmask, tmp33, 0) tmp36 = tl.sum(tmp35, 1)[:, None] tmp38 = triton_helpers.maximum(tmp2, tmp37) tmp39 = tmp0 - tmp38 tmp40 = tl_math.abs(tmp39) tmp43 = tmp42 * tmp8 tmp44 = tmp0 > tmp43 tmp45 = tmp44 & tmp13 tmp46 = tmp45.to(tl.float32) tmp47 = tmp40 * tmp46 tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.where(xmask, tmp48, 0) tmp51 = tl.sum(tmp50, 1)[:, None] tmp52 = tmp0 <= tmp43 tmp53 = tmp52 & tmp13 tmp54 = tmp53.to(tl.float32) tmp55 = tmp40 * tmp54 tmp56 = tl.broadcast_to(tmp55, [XBLOCK, RBLOCK]) tmp58 = tl.where(xmask, tmp56, 0) tmp59 = tl.sum(tmp58, 1)[:, None] tmp60 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK]) tmp62 = tl.where(xmask, tmp60, 0) tmp63 = tl.sum(tmp62, 1)[:, None] tmp64 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp66 = tl.where(xmask, tmp64, 0) tmp67 = tl.sum(tmp66, 1)[:, None] tmp69 = triton_helpers.maximum(tmp2, tmp68) tmp70 = tmp0 - tmp69 tmp71 = tl_math.abs(tmp70) tmp74 = tmp73 * tmp8 tmp75 = tmp0 > tmp74 tmp76 = tmp75 & tmp13 tmp77 = tmp76.to(tl.float32) tmp78 = tmp71 * tmp77 tmp79 = tl.broadcast_to(tmp78, [XBLOCK, RBLOCK]) tmp81 = tl.where(xmask, tmp79, 0) tmp82 = tl.sum(tmp81, 1)[:, None] tmp83 = tmp0 <= tmp74 tmp84 = tmp83 & tmp13 tmp85 = tmp84.to(tl.float32) tmp86 = tmp71 * tmp85 tmp87 = tl.broadcast_to(tmp86, [XBLOCK, RBLOCK]) tmp89 = tl.where(xmask, tmp87, 0) tmp90 = tl.sum(tmp89, 1)[:, None] tmp91 = tl.broadcast_to(tmp77, [XBLOCK, RBLOCK]) tmp93 = tl.where(xmask, tmp91, 0) tmp94 = tl.sum(tmp93, 1)[:, None] tmp95 = tl.broadcast_to(tmp85, [XBLOCK, RBLOCK]) tmp97 = tl.where(xmask, tmp95, 0) tmp98 = tl.sum(tmp97, 1)[:, None] tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) tl.store(out_ptr2 + x0, tmp32, xmask) tl.store(out_ptr3 + x0, tmp36, xmask) tl.store(out_ptr4 + x0, tmp51, xmask) tl.store(out_ptr5 + x0, tmp59, xmask) tl.store(out_ptr6 + x0, tmp63, xmask) tl.store(out_ptr7 + x0, tmp67, xmask) tl.store(out_ptr8 + x0, tmp82, xmask) tl.store(out_ptr9 + x0, tmp90, xmask) tl.store(out_ptr10 + x0, tmp94, xmask) tl.store(out_ptr11 + x0, tmp98, xmask) @triton.jit def triton_poi_fused_add_div_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp7 = tl.load(in_ptr1 + x0, xmask) tmp8 = tl.load(in_ptr1 + (4 + x0), xmask) tmp10 = tl.load(in_ptr1 + (8 + x0), xmask) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask) tmp19 = tl.load(in_ptr2 + x0, xmask) tmp20 = tl.load(in_ptr2 + (4 + x0), xmask) tmp22 = tl.load(in_ptr2 + (8 + x0), xmask) tmp24 = tl.load(in_ptr2 + (12 + x0), xmask) tmp26 = tl.load(in_ptr3 + x0, xmask) tmp27 = tl.load(in_ptr3 + (4 + x0), xmask) tmp29 = tl.load(in_ptr3 + (8 + x0), xmask) tmp31 = tl.load(in_ptr3 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tl.store(out_ptr0 + x0, tmp36, xmask) @triton.jit def triton_per_fused_mean_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused_add_div_mean_mul_sum_4(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (4 + r0), None) tmp3 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp7 = tl.load(in_ptr1 + r0, None) tmp8 = tl.load(in_ptr1 + (4 + r0), None) tmp10 = tl.load(in_ptr1 + (8 + r0), None) tmp12 = tl.load(in_ptr1 + (12 + r0), None) tmp19 = tl.load(in_ptr2 + r0, None) tmp20 = tl.load(in_ptr2 + (4 + r0), None) tmp22 = tl.load(in_ptr2 + (8 + r0), None) tmp24 = tl.load(in_ptr2 + (12 + r0), None) tmp26 = tl.load(in_ptr3 + r0, None) tmp27 = tl.load(in_ptr3 + (4 + r0), None) tmp29 = tl.load(in_ptr3 + (8 + r0), None) tmp31 = tl.load(in_ptr3 + (12 + r0), None) tmp42 = tl.load(in_out_ptr1 + 0) tmp43 = tl.broadcast_to(tmp42, [XBLOCK, 1]) tmp46 = tl.load(in_out_ptr2 + 0) tmp47 = tl.broadcast_to(tmp46, [XBLOCK, 1]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = tmp6 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp21 = tmp19 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp32 + tmp14 tmp34 = tmp25 / tmp33 tmp35 = tmp34 * tmp17 tmp36 = tmp18 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 4.0 tmp41 = tmp39 / tmp40 tmp44 = 2.0 tmp45 = tmp43 / tmp44 tmp48 = 1.0 tmp49 = tmp47 / tmp48 tmp50 = tmp41 + tmp45 tmp51 = tmp50 + tmp49 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp41, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp45, None) tl.debug_barrier() tl.store(in_out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp49, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp51, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 64), (256, 64, 1)) assert_size_stride(arg1_1, (4, 4, 16, 4), (256, 64, 4, 1)) assert_size_stride(arg2_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf9 = empty_strided_cuda((), (), torch.float32) buf18 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_max_0[grid(1)](arg1_1, buf0, buf9, buf18, 1, 1024, num_warps=8, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused__to_copy_abs_bitwise_and_div_eq_gt_le_mul_relu_repeat_sub_sum_view_1[ grid(16)](arg1_1, arg3_1, buf0, arg2_1, buf9, arg0_1, buf18, buf1, buf3, buf2, buf4, buf10, buf12, buf11, buf13, buf19, buf21, buf20, buf22, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_add_div_mul_sum_2[grid(4)](buf1, buf2, buf3, buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf1 del buf2 del buf3 del buf4 buf6 = torch.ops.aten.topk.default(buf5, 2) buf7 = buf6[0] del buf6 buf14 = buf5 del buf5 triton_poi_fused_add_div_mul_sum_2[grid(4)](buf10, buf11, buf12, buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf10 del buf11 del buf12 del buf13 buf15 = torch.ops.aten.topk.default(buf14, 1) del buf14 buf16 = buf15[0] del buf15 buf26 = buf9 del buf9 triton_per_fused_mean_3[grid(1)](buf7, buf26, 1, 2, XBLOCK=1, num_warps=2, num_stages=1) del buf7 buf24 = buf18 del buf18 buf25 = buf24 del buf24 buf27 = buf26 del buf26 buf28 = reinterpret_tensor(buf16, (), (), 0) del buf16 buf29 = buf0 del buf0 triton_per_fused_add_div_mean_mul_sum_4[grid(1)](buf25, buf27, buf28, buf19, buf20, buf21, buf22, buf29, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf19 del buf20 del buf21 del buf22 return buf29, buf25, buf27, buf28 class VisErrorLossV3New(nn.Module): def __init__(self): super(VisErrorLossV3New, self).__init__() def compute_l1_weighted_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) amplitude = torch.max(hm_targets) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) pos_ids = (hm_targets > amplitude / 10) & (vismap == 1) neg_ids = (hm_targets <= amplitude / 10) & (vismap == 1) diff = (hm_targets - hm_preds).abs() pos_loss = (diff * pos_ids.float()).sum(2).sum(0) / (pos_ids.float( ).sum(2).sum(0) + epsilon) neg_loss = (diff * neg_ids.float()).sum(2).sum(0) / (neg_ids.float( ).sum(2).sum(0) + epsilon) total_loss = 0.5 * pos_loss + 0.5 * neg_loss if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def compute_l2_loss(self, hm_targets, hm_preds, vismap, ohem=1.0): """ :param hm_targets: [batch size, keypoint number, h, w] :param hm_preds: [batch size, keypoint number, h, w] :param vismap: [batch size, keypoint number] :return: """ epsilon = 0.0001 hm_preds = F.relu(hm_preds, False) b, k, h, w = hm_targets.size() hm_targets = hm_targets.view(b, k, -1) hm_preds = hm_preds.view(b, k, -1) vismap = vismap.view(b, k, 1).repeat(1, 1, h * w) ids = vismap == 1 diff = (hm_targets - hm_preds) ** 2 total_loss = (diff * ids.float()).sum(2).sum(0) / (ids.float().sum( 2).sum(0) + epsilon) if ohem < 1: k = int(total_loss.size(0) * ohem) total_loss, _ = total_loss.topk(k) return total_loss.mean() def forward(self, input_0, input_1, input_2, input_3): arg1_1 = input_0 arg0_1 = input_1 arg3_1 = input_2 arg2_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1], output[2], output[3]
gathierry/FashionAI-KeyPointsDetectionOfApparel
VisErrorLossV3
false
15,434
[ "Apache-2.0" ]
174
2e0942b42b4a9cd974cdddc151675738dc8a8cb4
https://github.com/gathierry/FashionAI-KeyPointsDetectionOfApparel/tree/2e0942b42b4a9cd974cdddc151675738dc8a8cb4
ClusterAssignment
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qr/cqramfwzchcxgkcint325vw2hnfsaipzt53x3qxrfqbhcxae7edr.py # Topologically Sorted Source Nodes: [sub, pow_1, norm_squared, truediv, add, numerator, numerator_1, sum_2, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add, aten.reciprocal, aten.mul] # Source node to ATen node mapping: # add => add # norm_squared => sum_1 # numerator => mul, reciprocal # numerator_1 => pow_2 # pow_1 => pow_1 # sub => sub # sum_2 => sum_2 # truediv => div # truediv_2 => div_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %primals_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 1.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1.0), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 1.0), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, %sum_2), kwargs = {}) triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp12 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 - tmp1 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp9 = tmp8 - tmp1 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12 - tmp1 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = 1.0 tmp17 = tmp15 * tmp16 tmp18 = tmp17 + tmp16 tmp19 = tl.full([1], 1, tl.int32) tmp20 = tmp19 / tmp18 tmp21 = tmp20 * tmp16 tmp22 = tmp21 / tmp21 tl.store(in_out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, norm_squared, truediv, add, numerator, numerator_1, sum_2, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add, aten.reciprocal, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0.run(buf1, primals_1, primals_2, 64, grid=grid(64), stream=stream0) return (buf1, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter from typing import Optional class ClusterAssignment(nn.Module): def __init__(self, cluster_number: 'int', embedding_dimension: 'int', alpha: 'float'=1.0, cluster_centers: 'Optional[torch.Tensor]'=None ) ->None: """ Module to handle the soft assignment, for a description see in 3.1.1. in Xie/Girshick/Farhadi, where the Student's t-distribution is used measure similarity between feature vector and each cluster centroid. :param cluster_number: number of clusters :param embedding_dimension: embedding dimension of feature vectors :param alpha: parameter representing the degrees of freedom in the t-distribution, default 1.0 :param cluster_centers: clusters centers to initialise, if None then use Xavier uniform """ super(ClusterAssignment, self).__init__() self.embedding_dimension = embedding_dimension self.cluster_number = cluster_number self.alpha = alpha if cluster_centers is None: initial_cluster_centers = torch.zeros(self.cluster_number, self .embedding_dimension, dtype=torch.float) nn.init.xavier_uniform_(initial_cluster_centers) else: initial_cluster_centers = cluster_centers self.cluster_centers = Parameter(initial_cluster_centers) def forward(self, batch: 'torch.Tensor') ->torch.Tensor: """ Compute the soft assignment for a batch of feature vectors, returning a batch of assignments for each cluster. :param batch: FloatTensor of [batch size, embedding dimension] :return: FloatTensor [batch size, number of clusters] """ norm_squared = torch.sum((batch.unsqueeze(1) - self.cluster_centers ) ** 2, 2) numerator = 1.0 / (1.0 + norm_squared / self.alpha) power = float(self.alpha + 1) / 2 numerator = numerator ** power return numerator / torch.sum(numerator, dim=1, keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cluster_number': 4, 'embedding_dimension': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Parameter from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 - tmp1 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp9 = tmp8 - tmp1 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12 - tmp1 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = 1.0 tmp17 = tmp15 * tmp16 tmp18 = tmp17 + tmp16 tmp19 = tl.full([1], 1, tl.int32) tmp20 = tmp19 / tmp18 tmp21 = tmp20 * tmp16 tmp22 = tmp21 / tmp21 tl.store(in_out_ptr0 + x2, tmp22, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_div_mul_pow_reciprocal_sub_sum_0[grid(64)](buf1, primals_1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, primals_1, primals_2 class ClusterAssignmentNew(nn.Module): def __init__(self, cluster_number: 'int', embedding_dimension: 'int', alpha: 'float'=1.0, cluster_centers: 'Optional[torch.Tensor]'=None ) ->None: """ Module to handle the soft assignment, for a description see in 3.1.1. in Xie/Girshick/Farhadi, where the Student's t-distribution is used measure similarity between feature vector and each cluster centroid. :param cluster_number: number of clusters :param embedding_dimension: embedding dimension of feature vectors :param alpha: parameter representing the degrees of freedom in the t-distribution, default 1.0 :param cluster_centers: clusters centers to initialise, if None then use Xavier uniform """ super(ClusterAssignmentNew, self).__init__() self.embedding_dimension = embedding_dimension self.cluster_number = cluster_number self.alpha = alpha if cluster_centers is None: initial_cluster_centers = torch.zeros(self.cluster_number, self .embedding_dimension, dtype=torch.float) nn.init.xavier_uniform_(initial_cluster_centers) else: initial_cluster_centers = cluster_centers self.cluster_centers = Parameter(initial_cluster_centers) def forward(self, input_0): primals_2 = self.cluster_centers primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
giorgosVardakas/pt-dec
ClusterAssignment
false
15,435
[ "MIT" ]
200
c29b9634eb74c828efd9d2b87c613cdb0ddd1dd5
https://github.com/giorgosVardakas/pt-dec/tree/c29b9634eb74c828efd9d2b87c613cdb0ddd1dd5
SqueezeAndExcitationModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/vm/cvmsgl3j6cbrs4yvta6w5vjhmu7huh75tkw6spu64gpnevd5ytcj.py # Topologically Sorted Source Nodes: [scale], Original ATen: [aten.mean] # Source node to ATen node mapping: # scale => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p5/cp5us67vtvgvci2fdnd64iojdyp4orzux4324j6czewko4qz35ue.py # Topologically Sorted Source Nodes: [scale_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # scale_2 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_out_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr0 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp6, None) tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/az/cazousalzuqn73ciahz5izvogzu4ekcsktal4tthjvwjd3cqdayz.py # Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # scale_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fs/cfs4qiqhjuq4rjjmrtzf2cjre423zu5vqtebwr5o6wc6rv52ml7a.py # Topologically Sorted Source Nodes: [scale_4, x], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # scale_4 => sigmoid # x => mul # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [scale], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(primals_1, buf0, 4, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [scale_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 1), (0, 1, 0), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 1, 1), (1, 1, 1)) buf2 = reinterpret_tensor(buf1, (1, 1), (1, 1), 0); del buf1 # reuse buf6 = empty_strided_cuda((1, 1), (1, 1), torch.bool) # Topologically Sorted Source Nodes: [scale_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf6, 1, grid=grid(1), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 1, 1), (0, 0, 0), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 1), (4, 1, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf4, primals_5, 4, grid=grid(4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [scale_4, x], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_3.run(primals_1, buf4, buf5, 16, grid=grid(16), stream=stream0) return (buf5, primals_1, primals_2, primals_4, reinterpret_tensor(buf0, (1, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf2, (1, 1, 1), (1, 1, 1), 0), buf4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Swish(nn.Module): def __init__(self): super(Swish, self).__init__() def forward(self, x): return x * x.sigmoid() class Conv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='same', dilation=1, groups=1, bias=True): super(Conv1d, self).__init__(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding=0, dilation=dilation, groups=groups, bias=bias, padding_mode='zeros') assert padding in ['valid', 'same', 'causal'] if padding == 'valid': self.pre_padding = None elif padding == 'same': self.pre_padding = nn.ConstantPad1d(padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2), value=0) elif padding == 'causal': self.pre_padding = nn.ConstantPad1d(padding=(kernel_size - 1, 0 ), value=0) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input): weight = self.weight if self.noise is not None and self.training: weight = weight + self.vn_std * self.noise if self.pre_padding is not None: input = self.pre_padding(input) return F.conv1d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class SqueezeAndExcitationModule(nn.Module): """Squeeze And Excitation Module Args: input_dim: input feature dimension reduction_ratio: bottleneck reduction ratio inner_act: bottleneck inner activation function Input: (batch_size, in_dim, in_length) Output: (batch_size, out_dim, out_length) """ def __init__(self, input_dim, reduction_ratio, inner_act='relu'): super(SqueezeAndExcitationModule, self).__init__() assert input_dim % reduction_ratio == 0 self.conv1 = Conv1d(input_dim, input_dim // reduction_ratio, kernel_size=1) self.conv2 = Conv1d(input_dim // reduction_ratio, input_dim, kernel_size=1) assert inner_act in ['relu', 'swish'] if inner_act == 'relu': self.inner_act = nn.ReLU() elif inner_act == 'swish': self.inner_act = Swish() def forward(self, x): scale = x.mean(dim=-1, keepdim=True) scale = self.conv1(scale) scale = self.inner_act(scale) scale = self.conv2(scale) scale = scale.sigmoid() x = x * scale return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'reduction_ratio': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_out_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr0 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp6, None) tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp8, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(4)](primals_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 1 ), (0, 1, 0), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 1, 1), (1, 1, 1)) buf2 = reinterpret_tensor(buf1, (1, 1), (1, 1), 0) del buf1 buf6 = empty_strided_cuda((1, 1), (1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(1)](buf2, primals_3, buf6, 1, XBLOCK=1, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 1, 1 ), (0, 0, 0), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 1), (4, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(4)](buf4, primals_5, 4, XBLOCK= 4, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_sigmoid_3[grid(16)](primals_1, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf5, primals_1, primals_2, primals_4, reinterpret_tensor(buf0, (1, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf2, (1, 1, 1), (1, 1, 1), 0), buf4, buf6 class Swish(nn.Module): def __init__(self): super(Swish, self).__init__() def forward(self, x): return x * x.sigmoid() class Conv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='same', dilation=1, groups=1, bias=True): super(Conv1d, self).__init__(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding=0, dilation=dilation, groups=groups, bias=bias, padding_mode='zeros') assert padding in ['valid', 'same', 'causal'] if padding == 'valid': self.pre_padding = None elif padding == 'same': self.pre_padding = nn.ConstantPad1d(padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2), value=0) elif padding == 'causal': self.pre_padding = nn.ConstantPad1d(padding=(kernel_size - 1, 0 ), value=0) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input): weight = self.weight if self.noise is not None and self.training: weight = weight + self.vn_std * self.noise if self.pre_padding is not None: input = self.pre_padding(input) return F.conv1d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class SqueezeAndExcitationModuleNew(nn.Module): """Squeeze And Excitation Module Args: input_dim: input feature dimension reduction_ratio: bottleneck reduction ratio inner_act: bottleneck inner activation function Input: (batch_size, in_dim, in_length) Output: (batch_size, out_dim, out_length) """ def __init__(self, input_dim, reduction_ratio, inner_act='relu'): super(SqueezeAndExcitationModuleNew, self).__init__() assert input_dim % reduction_ratio == 0 self.conv1 = Conv1d(input_dim, input_dim // reduction_ratio, kernel_size=1) self.conv2 = Conv1d(input_dim // reduction_ratio, input_dim, kernel_size=1) assert inner_act in ['relu', 'swish'] if inner_act == 'relu': self.inner_act = nn.ReLU() elif inner_act == 'swish': self.inner_act = Swish() def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
gheyret/EfficientConformer
SqueezeAndExcitationModule
false
15,436
[ "Apache-2.0" ]
101
b28a0aaa3b182f72abaccbeb12df0402adf96097
https://github.com/gheyret/EfficientConformer/tree/b28a0aaa3b182f72abaccbeb12df0402adf96097
_Extraction
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/cn/ccn2cwknojpjzd5soqlzd7uat2rgmnkfagxccgoncep2feayxcy4.py # Topologically Sorted Source Nodes: [max_1, start_pos, end_pos], Original ATen: [aten.max, aten.floor_divide, aten.remainder] # Source node to ATen node mapping: # end_pos => remainder # max_1 => max_1 # start_pos => div # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, -1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor_mode](args = (%getitem_1, 4), kwargs = {rounding_mode: floor}) # %remainder : [num_users=1] = call_function[target=torch.ops.aten.remainder.Scalar](args = (%getitem_1, 4), kwargs = {}) triton_per_fused_floor_divide_max_remainder_0 = async_compile.triton('triton_per_fused_floor_divide_max_remainder_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i64', 4: '*i64', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_floor_divide_max_remainder_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_floor_divide_max_remainder_0(in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp10 = tl.load(in_ptr0 + ((4*x0) + (r1 // 4)), xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + ((4*x0) + (r1 // 4)), xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.load(in_ptr0 + ((4*x0) + (r1 % 4)), xmask, other=0.0) tmp20 = tl.load(in_ptr2 + ((4*x0) + (r1 % 4)), xmask, other=0.0) tmp0 = ((-1)*(r1 // 4)) + (r1 % 4) tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 == tmp3 tmp7 = tmp6.to(tl.float32) tmp8 = (tmp7 != 0) tmp9 = tmp8 == 0 tmp11 = 4.0 tmp12 = tmp10 != tmp11 tmp13 = tmp12 == 0 tmp15 = float("-inf") tmp16 = tl.where(tmp13, tmp15, tmp14) tmp18 = tmp17 != tmp11 tmp19 = tmp18 == 0 tmp21 = tl.where(tmp19, tmp15, tmp20) tmp22 = tmp16 + tmp21 tmp23 = tl.where(tmp9, tmp15, tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.where(xmask, tmp24, float("-inf")) tmp27 = tl.broadcast_to(rindex, tmp26.shape) _, tmp25_tmp = triton_helpers.max_with_index(tmp26, tmp27, 1) tmp25 = tmp25_tmp[:, None] tmp28 = tl.full([1, 1], 4, tl.int64) tmp29 = tl.where((tmp25 < 0) != (tmp28 < 0), tl.where(tmp25 % tmp28 != 0, tmp25 // tmp28 - 1, tmp25 // tmp28), tmp25 // tmp28) tmp30 = tmp25 % tmp28 tmp31 = tl.full([1, 1], 0, tl.int32) tmp32 = tmp30 != tmp31 tmp33 = libdevice.signbit(tmp30) if (tmp30).dtype is tl.float32 else tmp30 < 0 tmp34 = libdevice.signbit(tmp28) if (tmp28).dtype is tl.float32 else tmp28 < 0 tmp35 = tmp33 != tmp34 tmp36 = tmp32 & tmp35 tmp37 = tmp30 + tmp28 tmp38 = tl.where(tmp36, tmp37, tmp30) tl.store(out_ptr1 + (x0), tmp29, xmask) tl.store(out_ptr2 + (x0), tmp38, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, ), (1, ), torch.int64) buf3 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [max_1, start_pos, end_pos], Original ATen: [aten.max, aten.floor_divide, aten.remainder] stream0 = get_raw_stream(0) triton_per_fused_floor_divide_max_remainder_0.run(arg0_1, arg1_1, arg2_1, buf2, buf3, 4, 16, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.onnx.operators def create_max_segment_mask(tensor: 'Tensor', max_segment_length): """ Create max-segment mask. Args: tensor: :math: (N, T, *) where T is target dimension Returns: - max-segment mask: :math:`(N, T)` where T is target dimension """ sz = tensor.size(1) mask = [[(i <= j < i + max_segment_length) for j in range(sz)] for i in range(sz)] mask = torch.BoolTensor(mask).type_as(tensor).bool() return mask def create_upper_triangular_mask(tensor: 'Tensor'): """ Create upper triangular mask. It is usually used in auto-regressive model in training Args: tensor: :math: (N, T, *) where T is target dimension Returns: - upper triangular mask: :math:`(N, T)` where T is target dimension """ sz = tensor.size(1) mask = (torch.triu(torch.ones(sz, sz)) == 1).type_as(tensor).bool() return mask.detach() class _Extraction(torch.nn.Module): """ Extraction methods transform a pair of start and end position to a segment of context. Args: pad: pad index max_segment_length: maximum length for extracted results """ def __init__(self, pad, max_segment_length=None): super().__init__() self._pad = pad self._max_segment_length = max_segment_length def forward(self, context, start_logits, end_logits): """ Extract a piece of content from context Args: context: whole context for extraction start_logits: log probability of start position end_logits: log probability of end position Returns: - an extracted sequence of maximum probability """ attention_mask = context.ne(self._pad) start_logits = start_logits.masked_fill(~attention_mask, float('-inf')) end_logits = end_logits.masked_fill(~attention_mask, float('-inf')) batch_size, seqlen = context.size() logits = start_logits.unsqueeze(dim=2) + end_logits.unsqueeze(dim=1) mask = create_upper_triangular_mask(context) if self._max_segment_length: max_segment_mask = create_max_segment_mask(context, self. _max_segment_length) mask = mask & max_segment_mask logits = logits.masked_fill(~mask, float('-inf')) logits = logits.view(batch_size, seqlen * seqlen) _, pos = logits.max(dim=-1) start_pos, end_pos = pos // seqlen, pos % seqlen return start_pos, end_pos def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'pad': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import Tensor import torch.onnx.operators assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_floor_divide_max_remainder_0(in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp10 = tl.load(in_ptr0 + (4 * x0 + r1 // 4), xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + (4 * x0 + r1 // 4), xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tl.load(in_ptr0 + (4 * x0 + r1 % 4), xmask, other=0.0) tmp20 = tl.load(in_ptr2 + (4 * x0 + r1 % 4), xmask, other=0.0) tmp0 = -1 * (r1 // 4) + r1 % 4 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = 1.0 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = tmp5 == tmp3 tmp7 = tmp6.to(tl.float32) tmp8 = tmp7 != 0 tmp9 = tmp8 == 0 tmp11 = 4.0 tmp12 = tmp10 != tmp11 tmp13 = tmp12 == 0 tmp15 = float('-inf') tmp16 = tl.where(tmp13, tmp15, tmp14) tmp18 = tmp17 != tmp11 tmp19 = tmp18 == 0 tmp21 = tl.where(tmp19, tmp15, tmp20) tmp22 = tmp16 + tmp21 tmp23 = tl.where(tmp9, tmp15, tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.where(xmask, tmp24, float('-inf')) tmp27 = tl.broadcast_to(rindex, tmp26.shape) _, tmp25_tmp = triton_helpers.max_with_index(tmp26, tmp27, 1) tmp25 = tmp25_tmp[:, None] tmp28 = tl.full([1, 1], 4, tl.int64) tmp29 = tl.where((tmp25 < 0) != (tmp28 < 0), tl.where(tmp25 % tmp28 != 0, tmp25 // tmp28 - 1, tmp25 // tmp28), tmp25 // tmp28) tmp30 = tmp25 % tmp28 tmp31 = tl.full([1, 1], 0, tl.int32) tmp32 = tmp30 != tmp31 tmp33 = libdevice.signbit(tmp30 ) if tmp30.dtype is tl.float32 else tmp30 < 0 tmp34 = libdevice.signbit(tmp28 ) if tmp28.dtype is tl.float32 else tmp28 < 0 tmp35 = tmp33 != tmp34 tmp36 = tmp32 & tmp35 tmp37 = tmp30 + tmp28 tmp38 = tl.where(tmp36, tmp37, tmp30) tl.store(out_ptr1 + x0, tmp29, xmask) tl.store(out_ptr2 + x0, tmp38, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4,), (1,), torch.int64) buf3 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_per_fused_floor_divide_max_remainder_0[grid(4)](arg0_1, arg1_1, arg2_1, buf2, buf3, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, buf3 def create_max_segment_mask(tensor: 'Tensor', max_segment_length): """ Create max-segment mask. Args: tensor: :math: (N, T, *) where T is target dimension Returns: - max-segment mask: :math:`(N, T)` where T is target dimension """ sz = tensor.size(1) mask = [[(i <= j < i + max_segment_length) for j in range(sz)] for i in range(sz)] mask = torch.BoolTensor(mask).type_as(tensor).bool() return mask def create_upper_triangular_mask(tensor: 'Tensor'): """ Create upper triangular mask. It is usually used in auto-regressive model in training Args: tensor: :math: (N, T, *) where T is target dimension Returns: - upper triangular mask: :math:`(N, T)` where T is target dimension """ sz = tensor.size(1) mask = (torch.triu(torch.ones(sz, sz)) == 1).type_as(tensor).bool() return mask.detach() class _ExtractionNew(torch.nn.Module): """ Extraction methods transform a pair of start and end position to a segment of context. Args: pad: pad index max_segment_length: maximum length for extracted results """ def __init__(self, pad, max_segment_length=None): super().__init__() self._pad = pad self._max_segment_length = max_segment_length def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
godweiyang/ParaGen
_Extraction
false
15,437
[ "Apache-2.0" ]
50
9665d1244ea38a41fc06b4e0a7f6411985e2221f
https://github.com/godweiyang/ParaGen/tree/9665d1244ea38a41fc06b4e0a7f6411985e2221f
MultiHeadLinearAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/o6/co6c5rr7xc3wbbuyu2n72ozco6mt4xac5xlyyvjr6k7hufsjcbzn.py # Topologically Sorted Source Nodes: [truediv, softmax], Original ATen: [aten.div, aten._softmax] # Source node to ATen node mapping: # softmax => amax, clone, div_1, exp, sub, sum_1 # truediv => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_4, 1.0), kwargs = {}) # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%div,), kwargs = {memory_format: torch.contiguous_format}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-2], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_div_0 = async_compile.triton('triton_per_fused__softmax_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_div_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = (xindex // 4) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, float("-inf")) tmp8 = triton_helpers.max2(tmp7, 1)[:, None] tmp9 = tmp4 - tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r2 + (16*x3)), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/a3/ca3c423eelt6wuklohapyjrb6r2xhk3je6u3r7sngu36te4ofgnb.py # Topologically Sorted Source Nodes: [KV], Original ATen: [aten.clone] # Source node to ATen node mapping: # KV => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/y6/cy66rg3jcbi5u6bq26rj7j2gfban2ubmmr7v5btaluhh3qro5m4m.py # Topologically Sorted Source Nodes: [truediv_1, softmax_1], Original ATen: [aten.div, aten._softmax] # Source node to ATen node mapping: # softmax_1 => amax_1, clone_2, div_3, exp_1, sub_1, sum_2 # truediv_1 => div_2 # Graph fragment: # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_3, 1.0), kwargs = {}) # %clone_2 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%div_2,), kwargs = {memory_format: torch.contiguous_format}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone_2, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_2, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) triton_poi_fused__softmax_div_2 = async_compile.triton('triton_poi_fused__softmax_div_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = (yindex // 16) tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp4 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tl.store(out_ptr0 + (y0 + (16*x2) + (64*y1)), tmp7, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2s/c2s3zo6qtbodb6bdwv46ozxj4nxxymp76igm7emvdafvrj3673sn.py # Topologically Sorted Source Nodes: [O_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # O_2 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_15,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5a/c5ainvzw5uinosmtjpfsqml2wdleittg5twoainmhbtwdqayr4sw.py # Topologically Sorted Source Nodes: [O_2], Original ATen: [aten.add] # Source node to ATen node mapping: # O_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_11), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf5 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv, softmax], Original ATen: [aten.div, aten._softmax] stream0 = get_raw_stream(0) triton_per_fused__softmax_div_0.run(buf1, primals_5, buf5, 16, 16, grid=grid(16), stream=stream0) del primals_5 buf6 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [KV], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf2, primals_8, buf6, 16, 16, grid=grid(16, 16), stream=stream0) del primals_8 buf7 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [KV], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (16, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf6, (16, 16, 1), (16, 1, 0), 0), out=buf7) buf8 = reinterpret_tensor(buf2, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [truediv_1, softmax_1], Original ATen: [aten.div, aten._softmax] triton_poi_fused__softmax_div_2.run(buf0, primals_3, buf8, 64, 4, grid=grid(64, 4), stream=stream0) del primals_3 buf9 = reinterpret_tensor(buf0, (16, 16, 1), (16, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [O], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 1), (16, 1, 1), 0), buf7, out=buf9) buf10 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [O_2], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf9, buf10, 64, 4, grid=grid(64, 4), stream=stream0) buf11 = reinterpret_tensor(buf9, (64, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [O_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 16, 4), (64, 4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [O_2], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf12, primals_11, 256, grid=grid(256), stream=stream0) del primals_11 return (buf12, reinterpret_tensor(buf7, (4, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf5, buf8, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), primals_10, buf7, reinterpret_tensor(buf6, (16, 1, 16), (16, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Linear(nn.Linear): def __init__(self, in_features, out_features, bias=True): super(Linear, self).__init__(in_features=in_features, out_features= out_features, bias=bias) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input): weight = self.weight if self.noise is not None and self.training: weight = weight + self.vn_std * self.noise return F.linear(input, weight, self.bias) class MultiHeadAttention(nn.Module): """Mutli-Head Attention Layer Args: dim_model: model feature dimension num_heads: number of attention heads References: Attention Is All You Need, Vaswani et al. https://arxiv.org/abs/1706.03762 """ def __init__(self, dim_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.dim_model = dim_model self.dim_head = dim_model // num_heads self.query_layer = Linear(self.dim_model, self.dim_model) self.key_layer = Linear(self.dim_model, self.dim_model) self.value_layer = Linear(self.dim_model, self.dim_model) self.output_layer = Linear(self.dim_model, self.dim_model) def forward(self, Q, K, V, mask=None): """Scaled Dot-Product Multi-Head Attention Args: Q: Query of shape (B, T, D) K: Key of shape (B, T, D) V: Value of shape (B, T, D) mask: Optional position mask of shape (1 or B, 1 or H, 1 or T, 1 or T) Return: O: Attention output of shape (B, T, D) att_w: Attention weights of shape (B, H, T, T) """ batch_size = Q.size(0) Q = self.query_layer(Q) K = self.key_layer(K) V = self.value_layer(V) Q = Q.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) K = K.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) V = V.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) att_scores = Q.matmul(K.transpose(2, 3)) / K.shape[-1] ** 0.5 if mask is not None: att_scores += mask * -1000000000.0 att_w = att_scores.softmax(dim=-1) O = att_w.matmul(V) O = O.transpose(1, 2).reshape(batch_size, -1, self.dim_model) O = self.output_layer(O) return O, att_w.detach() def pad(self, Q, K, V, mask, chunk_size): overflow_Q = Q.size(1) % chunk_size overflow_KV = K.size(1) % chunk_size padding_Q = chunk_size - overflow_Q if overflow_Q else 0 padding_KV = chunk_size - overflow_KV if overflow_KV else 0 batch_size, seq_len_KV, _ = K.size() Q = F.pad(Q, (0, 0, 0, padding_Q), value=0) K = F.pad(K, (0, 0, 0, padding_KV), value=0) V = F.pad(V, (0, 0, 0, padding_KV), value=0) if mask is not None: if mask.size(2) == 1: mask = F.pad(mask, pad=(0, padding_KV), value=1) else: mask = F.pad(mask, pad=(0, padding_Q, 0, padding_KV), value=1) elif padding_KV: mask = F.pad(Q.new_zeros(batch_size, 1, 1, seq_len_KV), pad=(0, padding_KV), value=1) return Q, K, V, mask, padding_Q class MultiHeadLinearAttention(MultiHeadAttention): """Multi-Head Linear Attention Args: dim_model: model feature dimension num_heads: number of attention heads References: Efficient Attention: Attention with Linear Complexities, Shen et al. https://arxiv.org/abs/1812.01243 Efficient conformer-based speech recognition with linear attention, Li et al. https://arxiv.org/abs/2104.06865 """ def __init__(self, dim_model, num_heads): super(MultiHeadLinearAttention, self).__init__(dim_model, num_heads) def forward(self, Q, K, V): batch_size = Q.size(0) Q = self.query_layer(Q) K = self.key_layer(K) V = self.value_layer(V) Q = Q.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) K = K.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) V = V.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) KV = (K / K.shape[-1] ** (1.0 / 4.0)).softmax(dim=-2).transpose(2, 3 ).matmul(V) O = (Q / Q.shape[-1] ** (1.0 / 4.0)).softmax(dim=-1).matmul(KV) O = O.transpose(1, 2).reshape(batch_size, -1, self.dim_model) O = self.output_layer(O) return O, KV.detach() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_model': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_div_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4 x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, float('-inf')) tmp8 = triton_helpers.max2(tmp7, 1)[:, None] tmp9 = tmp4 - tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r2 + 16 * x3), tmp15, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_div_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp4 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tl.store(out_ptr0 + (y0 + 16 * x2 + 64 * y1), tmp7, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf5 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_div_0[grid(16)](buf1, primals_5, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_5 buf6 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf1 triton_poi_fused_clone_1[grid(16, 16)](buf2, primals_8, buf6, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf7 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (16, 1, 16), (16, 16, 1 ), 0), reinterpret_tensor(buf6, (16, 16, 1), (16, 1, 0), 0), out=buf7) buf8 = reinterpret_tensor(buf2, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf2 triton_poi_fused__softmax_div_2[grid(64, 4)](buf0, primals_3, buf8, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf9 = reinterpret_tensor(buf0, (16, 16, 1), (16, 1, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 1), (16, 1, 1), 0), buf7, out=buf9) buf10 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(64, 4)](buf9, buf10, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (64, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 16, 4), (64, 4, 1), 0) del buf11 triton_poi_fused_add_4[grid(256)](buf12, primals_11, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_11 return buf12, reinterpret_tensor(buf7, (4, 4, 1, 1), (4, 1, 1, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf5, buf8, reinterpret_tensor(buf10, (64, 4), (4, 1), 0 ), primals_10, buf7, reinterpret_tensor(buf6, (16, 1, 16), (16, 1, 1), 0) class Linear(nn.Linear): def __init__(self, in_features, out_features, bias=True): super(Linear, self).__init__(in_features=in_features, out_features= out_features, bias=bias) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input): weight = self.weight if self.noise is not None and self.training: weight = weight + self.vn_std * self.noise return F.linear(input, weight, self.bias) class MultiHeadAttention(nn.Module): """Mutli-Head Attention Layer Args: dim_model: model feature dimension num_heads: number of attention heads References: Attention Is All You Need, Vaswani et al. https://arxiv.org/abs/1706.03762 """ def __init__(self, dim_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.dim_model = dim_model self.dim_head = dim_model // num_heads self.query_layer = Linear(self.dim_model, self.dim_model) self.key_layer = Linear(self.dim_model, self.dim_model) self.value_layer = Linear(self.dim_model, self.dim_model) self.output_layer = Linear(self.dim_model, self.dim_model) def forward(self, Q, K, V, mask=None): """Scaled Dot-Product Multi-Head Attention Args: Q: Query of shape (B, T, D) K: Key of shape (B, T, D) V: Value of shape (B, T, D) mask: Optional position mask of shape (1 or B, 1 or H, 1 or T, 1 or T) Return: O: Attention output of shape (B, T, D) att_w: Attention weights of shape (B, H, T, T) """ batch_size = Q.size(0) Q = self.query_layer(Q) K = self.key_layer(K) V = self.value_layer(V) Q = Q.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) K = K.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) V = V.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) att_scores = Q.matmul(K.transpose(2, 3)) / K.shape[-1] ** 0.5 if mask is not None: att_scores += mask * -1000000000.0 att_w = att_scores.softmax(dim=-1) O = att_w.matmul(V) O = O.transpose(1, 2).reshape(batch_size, -1, self.dim_model) O = self.output_layer(O) return O, att_w.detach() def pad(self, Q, K, V, mask, chunk_size): overflow_Q = Q.size(1) % chunk_size overflow_KV = K.size(1) % chunk_size padding_Q = chunk_size - overflow_Q if overflow_Q else 0 padding_KV = chunk_size - overflow_KV if overflow_KV else 0 batch_size, seq_len_KV, _ = K.size() Q = F.pad(Q, (0, 0, 0, padding_Q), value=0) K = F.pad(K, (0, 0, 0, padding_KV), value=0) V = F.pad(V, (0, 0, 0, padding_KV), value=0) if mask is not None: if mask.size(2) == 1: mask = F.pad(mask, pad=(0, padding_KV), value=1) else: mask = F.pad(mask, pad=(0, padding_Q, 0, padding_KV), value=1) elif padding_KV: mask = F.pad(Q.new_zeros(batch_size, 1, 1, seq_len_KV), pad=(0, padding_KV), value=1) return Q, K, V, mask, padding_Q class MultiHeadLinearAttentionNew(MultiHeadAttention): """Multi-Head Linear Attention Args: dim_model: model feature dimension num_heads: number of attention heads References: Efficient Attention: Attention with Linear Complexities, Shen et al. https://arxiv.org/abs/1812.01243 Efficient conformer-based speech recognition with linear attention, Li et al. https://arxiv.org/abs/2104.06865 """ def __init__(self, dim_model, num_heads): super(MultiHeadLinearAttentionNew, self).__init__(dim_model, num_heads) def forward(self, input_0, input_1, input_2): primals_2 = self.query_layer.weight primals_3 = self.query_layer.bias primals_4 = self.key_layer.weight primals_5 = self.key_layer.bias primals_7 = self.value_layer.weight primals_8 = self.value_layer.bias primals_10 = self.output_layer.weight primals_11 = self.output_layer.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
gheyret/EfficientConformer
MultiHeadLinearAttention
false
15,438
[ "Apache-2.0" ]
101
b28a0aaa3b182f72abaccbeb12df0402adf96097
https://github.com/gheyret/EfficientConformer/tree/b28a0aaa3b182f72abaccbeb12df0402adf96097
PowerLaw_Compressed_Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/li/cli7xfzlnx5in3ir5ua6wggbl6sat2aztcifjjqch3ujh34wvta2.py # Topologically Sorted Source Nodes: [target, target_1, abs_1, prediction, prediction_1, abs_2, spec_loss, complex_loss, mul, loss], Original ATen: [aten.add, aten.pow, aten.abs, aten.mse_loss, aten.mul] # Source node to ATen node mapping: # abs_1 => abs_1 # abs_2 => abs_2 # complex_loss => mean_1, pow_4, sub_1 # loss => add_2 # mul => mul # prediction => add # prediction_1 => pow_1 # spec_loss => mean, pow_3, sub # target => add_1 # target_1 => pow_2 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1e-16), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 0.3), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%pow_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-16), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.3), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%pow_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, %abs_2), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %pow_1), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_4,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.113), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mul), kwargs = {}) triton_per_fused_abs_add_mse_loss_mul_pow_0 = async_compile.triton('triton_per_fused_abs_add_mse_loss_mul_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_mse_loss_mul_pow_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_mse_loss_mul_pow_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp6 = tl.load(in_ptr1 + (r0), None) tmp1 = 1e-16 tmp2 = tmp0 + tmp1 tmp3 = 0.3 tmp4 = libdevice.pow(tmp2, tmp3) tmp5 = tl_math.abs(tmp4) tmp7 = tmp6 + tmp1 tmp8 = libdevice.pow(tmp7, tmp3) tmp9 = tl_math.abs(tmp8) tmp10 = tmp5 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = tmp4 - tmp8 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = 256.0 tmp21 = tmp14 / tmp20 tmp22 = tmp19 / tmp20 tmp23 = 0.113 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp25, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [target, target_1, abs_1, prediction, prediction_1, abs_2, spec_loss, complex_loss, mul, loss], Original ATen: [aten.add, aten.pow, aten.abs, aten.mse_loss, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_abs_add_mse_loss_mul_pow_0.run(buf2, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class PowerLaw_Compressed_Loss(nn.Module): def __init__(self, power=0.3, complex_loss_ratio=0.113): super(PowerLaw_Compressed_Loss, self).__init__() self.power = power self.complex_loss_ratio = complex_loss_ratio self.criterion = nn.MSELoss() self.epsilon = 1e-16 def forward(self, prediction, target, seq_len=None, spec_phase=None): prediction = prediction + self.epsilon target = target + self.epsilon prediction = torch.pow(prediction, self.power) target = torch.pow(target, self.power) spec_loss = self.criterion(torch.abs(target), torch.abs(prediction)) complex_loss = self.criterion(target, prediction) loss = spec_loss + complex_loss * self.complex_loss_ratio return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_mse_loss_mul_pow_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + r0, None) tmp1 = 1e-16 tmp2 = tmp0 + tmp1 tmp3 = 0.3 tmp4 = libdevice.pow(tmp2, tmp3) tmp5 = tl_math.abs(tmp4) tmp7 = tmp6 + tmp1 tmp8 = libdevice.pow(tmp7, tmp3) tmp9 = tl_math.abs(tmp8) tmp10 = tmp5 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = tmp4 - tmp8 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = 256.0 tmp21 = tmp14 / tmp20 tmp22 = tmp19 / tmp20 tmp23 = 0.113 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_mse_loss_mul_pow_0[grid(1)](buf2, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class PowerLaw_Compressed_LossNew(nn.Module): def __init__(self, power=0.3, complex_loss_ratio=0.113): super(PowerLaw_Compressed_LossNew, self).__init__() self.power = power self.complex_loss_ratio = complex_loss_ratio self.criterion = nn.MSELoss() self.epsilon = 1e-16 def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
giuliacassara/VoiceSplit
PowerLaw_Compressed_Loss
false
15,439
[ "Apache-2.0" ]
84
1aa98dce9460db7ec6c5449eb7f92e3902f71a2a
https://github.com/giuliacassara/VoiceSplit/tree/1aa98dce9460db7ec6c5449eb7f92e3902f71a2a
AUXModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ku/ckuooqx6q27vkk7tqqmkpsydmkdv3enwjhqctsyozm4owufb7yb6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.adaptive_max_pool2d] # Source node to ATen node mapping: # x => adaptive_max_pool2d # Graph fragment: # %adaptive_max_pool2d : [num_users=1] = call_function[target=torch.ops.aten.adaptive_max_pool2d.default](args = (%primals_1, [1, 1]), kwargs = {}) triton_poi_fused_adaptive_max_pool2d_0 = async_compile.triton('triton_poi_fused_adaptive_max_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_adaptive_max_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + (x0), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.adaptive_max_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return (buf1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AUXModule(nn.Module): def __init__(self, in_features, out_features): super().__init__() self.linear = nn.Linear(in_features, out_features) def forward(self, x): x = F.adaptive_max_pool2d(x, output_size=(1, 1)) x = x.view(-1, x.size(1)) x = self.linear(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x0, tmp30, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf1) del primals_2 del primals_3 return buf1, reinterpret_tensor(buf0, (4, 4), (4, 1), 0) class AUXModuleNew(nn.Module): def __init__(self, in_features, out_features): super().__init__() self.linear = nn.Linear(in_features, out_features) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
gorogoroyasu/mlcomp
AUXModule
false
15,440
[ "Apache-2.0" ]
166
fc6572ca5b226b35df97f13badd4420b30468a3b
https://github.com/gorogoroyasu/mlcomp/tree/fc6572ca5b226b35df97f13badd4420b30468a3b
HuggingfaceClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pd/cpdu37l3bj63bjibgjk2ueagf7o3e26iukuvw6axiaa2bjb2e6op.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8, ), (1, )) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf3, 512, grid=grid(512), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch.onnx.operators def get_activation_fn(activation): """ Get activation function by name Args: activation: activation function name Returns: - activation function """ if activation == 'relu': return F.relu elif activation == 'gelu': return F.gelu else: raise KeyError class HuggingfaceClassifier(nn.Module): """ Classifier implemented in HuggingfaceClassificationHead style. Args: d_model: feature dimensionality labels: number of classes inner_dim: dimensionality in the inner vector space. activation: activation function used in the feed-forward network dropout: dropout rate """ def __init__(self, d_model, labels, inner_dim=None, activation='relu', dropout=0.0): super().__init__() inner_dim = inner_dim or d_model * 2 self._fc1 = nn.Linear(d_model, inner_dim) self._dropout = nn.Dropout(dropout) self._fc2 = nn.Linear(inner_dim, labels) self._activation = get_activation_fn(activation) def forward(self, x): """ Args: x: feature to predict labels :math:`(*, D)`, where D is the feature dimension Returns: - log probability of each classes :math: `(*, L)`, where L is the number of classes """ x = self._dropout(x) x = self._fc1(x) x = self._activation(x) x = self._dropout(x) x = self._fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'labels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.functional as F import torch.nn as nn import torch.onnx.operators assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8), (128, 32, 8, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1, primals_3, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 8), (8, 1), 0), primals_4, buf3 def get_activation_fn(activation): """ Get activation function by name Args: activation: activation function name Returns: - activation function """ if activation == 'relu': return F.relu elif activation == 'gelu': return F.gelu else: raise KeyError class HuggingfaceClassifierNew(nn.Module): """ Classifier implemented in HuggingfaceClassificationHead style. Args: d_model: feature dimensionality labels: number of classes inner_dim: dimensionality in the inner vector space. activation: activation function used in the feed-forward network dropout: dropout rate """ def __init__(self, d_model, labels, inner_dim=None, activation='relu', dropout=0.0): super().__init__() inner_dim = inner_dim or d_model * 2 self._fc1 = nn.Linear(d_model, inner_dim) self._dropout = nn.Dropout(dropout) self._fc2 = nn.Linear(inner_dim, labels) self._activation = get_activation_fn(activation) def forward(self, input_0): primals_2 = self._fc1.weight primals_3 = self._fc1.bias primals_4 = self._fc2.weight primals_5 = self._fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
godweiyang/ParaGen
HuggingfaceClassifier
false
15,441
[ "Apache-2.0" ]
50
9665d1244ea38a41fc06b4e0a7f6411985e2221f
https://github.com/godweiyang/ParaGen/tree/9665d1244ea38a41fc06b4e0a7f6411985e2221f
SimpleTextClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/kg/ckgzihqkefbwia453q3p3zvcladudrw3ev36dxtifw6sceehflpj.py # Topologically Sorted Source Nodes: [hidden1], Original ATen: [aten.clamp, aten.ge] # Source node to ATen node mapping: # hidden1 => clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, 0), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_1, 0), kwargs = {}) triton_poi_fused_clamp_ge_0 = async_compile.triton('triton_poi_fused_clamp_ge_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bg/cbg32drchyezvbfwshguvyopixmzwi2llws7xkhvpdruis76tr2t.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/oo/coo5rivaroinv27r7to5gs4jb7ce7itar6epfsastoa2ig6tj65k.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [hidden1], Original ATen: [aten.clamp, aten.ge] stream0 = get_raw_stream(0) triton_poi_fused_clamp_ge_0.run(buf0, primals_2, buf1, buf5, 8192, grid=grid(8192), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf4, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SimpleTextClassifier(nn.Module): """Text Classifier with 1 hidden layer """ def __init__(self, num_labels, vocab_size): super(SimpleTextClassifier, self).__init__() self.linear1 = nn.Linear(vocab_size, 128) self.linear2 = nn.Linear(128, num_labels) def forward(self, feature_vec): hidden1 = self.linear1(feature_vec).clamp(min=0) output = self.linear2(hidden1) return F.log_softmax(output, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_labels': 4, 'vocab_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp5, None) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clamp_ge_0[grid(8192)](buf0, primals_2, buf1, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), buf4, primals_4, buf5 class SimpleTextClassifierNew(nn.Module): """Text Classifier with 1 hidden layer """ def __init__(self, num_labels, vocab_size): super(SimpleTextClassifierNew, self).__init__() self.linear1 = nn.Linear(vocab_size, 128) self.linear2 = nn.Linear(128, num_labels) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
goodmike31/pytorch_active_learning
SimpleTextClassifier
false
15,442
[ "MIT" ]
629
1224efad1f8022efa933cd36e30f78ed06eaaea7
https://github.com/goodmike31/pytorch_active_learning/tree/1224efad1f8022efa933cd36e30f78ed06eaaea7
LocalMultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/5t/c5txhhxqwgxdtwmgseoxqhkhd47b4q5u6chxi7shnrpkkyfwlubq.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7d/c7ddfgrc22a7252ttzyyslijvxoc7y7jzhuvjamysv4m2g3zo2a7.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/js/cjsmlqn67etksvbhpa5ea3a3bvf5n2q2qz46jovzyawqxzvfnaes.py # Topologically Sorted Source Nodes: [att_w], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_w => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mg/cmgiz33dt6tfbqkmgx6alqvz6wjg7vdugxhswqlmo26mlpfls7or.py # Topologically Sorted Source Nodes: [att_w], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_w => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ox/coxm2defb4vel2jkahsdwcqypbhilkr6mdd5l4bowkc4r743axgv.py # Topologically Sorted Source Nodes: [O_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # O_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 64, 4, grid=grid(64, 4), stream=stream0) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, primals_5, buf4, 256, grid=grid(256), stream=stream0) del buf1 del primals_5 buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_w], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 1024, grid=grid(1024), stream=stream0) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [att_w], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf6, buf7, 1024, grid=grid(1024), stream=stream0) del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [O], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_8, buf8, 64, 4, grid=grid(64, 4), stream=stream0) del primals_8 buf9 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [O], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (64, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [O_1], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf9, buf10, 64, 4, grid=grid(64, 4), stream=stream0) buf11 = reinterpret_tensor(buf9, (64, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [O_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_11 return (reinterpret_tensor(buf11, (4, 16, 4), (64, 4, 1), 0), buf7, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf8, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Linear(nn.Linear): def __init__(self, in_features, out_features, bias=True): super(Linear, self).__init__(in_features=in_features, out_features= out_features, bias=bias) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input): weight = self.weight if self.noise is not None and self.training: weight = weight + self.vn_std * self.noise return F.linear(input, weight, self.bias) class MultiHeadAttention(nn.Module): """Mutli-Head Attention Layer Args: dim_model: model feature dimension num_heads: number of attention heads References: Attention Is All You Need, Vaswani et al. https://arxiv.org/abs/1706.03762 """ def __init__(self, dim_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.dim_model = dim_model self.dim_head = dim_model // num_heads self.query_layer = Linear(self.dim_model, self.dim_model) self.key_layer = Linear(self.dim_model, self.dim_model) self.value_layer = Linear(self.dim_model, self.dim_model) self.output_layer = Linear(self.dim_model, self.dim_model) def forward(self, Q, K, V, mask=None): """Scaled Dot-Product Multi-Head Attention Args: Q: Query of shape (B, T, D) K: Key of shape (B, T, D) V: Value of shape (B, T, D) mask: Optional position mask of shape (1 or B, 1 or H, 1 or T, 1 or T) Return: O: Attention output of shape (B, T, D) att_w: Attention weights of shape (B, H, T, T) """ batch_size = Q.size(0) Q = self.query_layer(Q) K = self.key_layer(K) V = self.value_layer(V) Q = Q.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) K = K.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) V = V.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) att_scores = Q.matmul(K.transpose(2, 3)) / K.shape[-1] ** 0.5 if mask is not None: att_scores += mask * -1000000000.0 att_w = att_scores.softmax(dim=-1) O = att_w.matmul(V) O = O.transpose(1, 2).reshape(batch_size, -1, self.dim_model) O = self.output_layer(O) return O, att_w.detach() def pad(self, Q, K, V, mask, chunk_size): overflow_Q = Q.size(1) % chunk_size overflow_KV = K.size(1) % chunk_size padding_Q = chunk_size - overflow_Q if overflow_Q else 0 padding_KV = chunk_size - overflow_KV if overflow_KV else 0 batch_size, seq_len_KV, _ = K.size() Q = F.pad(Q, (0, 0, 0, padding_Q), value=0) K = F.pad(K, (0, 0, 0, padding_KV), value=0) V = F.pad(V, (0, 0, 0, padding_KV), value=0) if mask is not None: if mask.size(2) == 1: mask = F.pad(mask, pad=(0, padding_KV), value=1) else: mask = F.pad(mask, pad=(0, padding_Q, 0, padding_KV), value=1) elif padding_KV: mask = F.pad(Q.new_zeros(batch_size, 1, 1, seq_len_KV), pad=(0, padding_KV), value=1) return Q, K, V, mask, padding_Q class LocalMultiHeadAttention(MultiHeadAttention): """Local Multi-Head Attention Layer Local multi-head attention restricts the attended positions to a local neighborhood around the query position. This is achieved by segmenting the hidden sequence into non overlapping blocks of size K and performing scaled dot-product attention in parallel for each of these blocks. Args: dim_model: model feature dimension num_heads: number of attention heads kernel_size: attention kernel size / window References: Image Transformer, Parmar et al. https://arxiv.org/abs/1802.05751 """ def __init__(self, dim_model, num_heads, kernel_size): super(LocalMultiHeadAttention, self).__init__(dim_model, num_heads) self.kernel_size = kernel_size def forward(self, Q, K, V, mask=None): batch_size = Q.size(0) Q = self.query_layer(Q) K = self.key_layer(K) V = self.value_layer(V) Q, K, V, mask, padding = self.pad(Q, K, V, mask, chunk_size=self. kernel_size) Q = Q.reshape(batch_size, -1, self.kernel_size, self.num_heads, self.dim_head).transpose(2, 3) K = K.reshape(batch_size, -1, self.kernel_size, self.num_heads, self.dim_head).transpose(2, 3) V = V.reshape(batch_size, -1, self.kernel_size, self.num_heads, self.dim_head).transpose(2, 3) att_scores = Q.matmul(K.transpose(3, 4)) / K.shape[-1] ** 0.5 if mask is not None: masks = [] for m in range(mask.size(-1) // self.kernel_size): masks.append(mask[:, :, m * self.kernel_size:(m + 1) * self .kernel_size, m * self.kernel_size:(m + 1) * self. kernel_size]) mask = torch.stack(masks, dim=1) att_scores = att_scores.float() - mask.float() * 1000000000.0 att_w = att_scores.softmax(dim=-1) O = att_w.matmul(V) O = O.transpose(2, 3).reshape(batch_size, -1, self.dim_model) O = O[:, :O.size(1) - padding] O = self.output_layer(O) return O, att_w.detach() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_model': 4, 'num_heads': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](buf0, primals_3, buf3, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(256)](buf1, primals_5, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_5 buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(1024)](buf5, buf6, 1024, XBLOCK= 256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ) del buf5 triton_poi_fused__softmax_3[grid(1024)](buf6, buf7, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_0[grid(64, 4)](buf2, primals_8, buf8, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (64, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(64, 4)](buf9, buf10, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (64, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_11 return reinterpret_tensor(buf11, (4, 16, 4), (64, 4, 1), 0 ), buf7, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (64, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf8, (64, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0) class Linear(nn.Linear): def __init__(self, in_features, out_features, bias=True): super(Linear, self).__init__(in_features=in_features, out_features= out_features, bias=bias) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input): weight = self.weight if self.noise is not None and self.training: weight = weight + self.vn_std * self.noise return F.linear(input, weight, self.bias) class MultiHeadAttention(nn.Module): """Mutli-Head Attention Layer Args: dim_model: model feature dimension num_heads: number of attention heads References: Attention Is All You Need, Vaswani et al. https://arxiv.org/abs/1706.03762 """ def __init__(self, dim_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.dim_model = dim_model self.dim_head = dim_model // num_heads self.query_layer = Linear(self.dim_model, self.dim_model) self.key_layer = Linear(self.dim_model, self.dim_model) self.value_layer = Linear(self.dim_model, self.dim_model) self.output_layer = Linear(self.dim_model, self.dim_model) def forward(self, Q, K, V, mask=None): """Scaled Dot-Product Multi-Head Attention Args: Q: Query of shape (B, T, D) K: Key of shape (B, T, D) V: Value of shape (B, T, D) mask: Optional position mask of shape (1 or B, 1 or H, 1 or T, 1 or T) Return: O: Attention output of shape (B, T, D) att_w: Attention weights of shape (B, H, T, T) """ batch_size = Q.size(0) Q = self.query_layer(Q) K = self.key_layer(K) V = self.value_layer(V) Q = Q.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) K = K.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) V = V.reshape(batch_size, -1, self.num_heads, self.dim_head).transpose( 1, 2) att_scores = Q.matmul(K.transpose(2, 3)) / K.shape[-1] ** 0.5 if mask is not None: att_scores += mask * -1000000000.0 att_w = att_scores.softmax(dim=-1) O = att_w.matmul(V) O = O.transpose(1, 2).reshape(batch_size, -1, self.dim_model) O = self.output_layer(O) return O, att_w.detach() def pad(self, Q, K, V, mask, chunk_size): overflow_Q = Q.size(1) % chunk_size overflow_KV = K.size(1) % chunk_size padding_Q = chunk_size - overflow_Q if overflow_Q else 0 padding_KV = chunk_size - overflow_KV if overflow_KV else 0 batch_size, seq_len_KV, _ = K.size() Q = F.pad(Q, (0, 0, 0, padding_Q), value=0) K = F.pad(K, (0, 0, 0, padding_KV), value=0) V = F.pad(V, (0, 0, 0, padding_KV), value=0) if mask is not None: if mask.size(2) == 1: mask = F.pad(mask, pad=(0, padding_KV), value=1) else: mask = F.pad(mask, pad=(0, padding_Q, 0, padding_KV), value=1) elif padding_KV: mask = F.pad(Q.new_zeros(batch_size, 1, 1, seq_len_KV), pad=(0, padding_KV), value=1) return Q, K, V, mask, padding_Q class LocalMultiHeadAttentionNew(MultiHeadAttention): """Local Multi-Head Attention Layer Local multi-head attention restricts the attended positions to a local neighborhood around the query position. This is achieved by segmenting the hidden sequence into non overlapping blocks of size K and performing scaled dot-product attention in parallel for each of these blocks. Args: dim_model: model feature dimension num_heads: number of attention heads kernel_size: attention kernel size / window References: Image Transformer, Parmar et al. https://arxiv.org/abs/1802.05751 """ def __init__(self, dim_model, num_heads, kernel_size): super(LocalMultiHeadAttentionNew, self).__init__(dim_model, num_heads) self.kernel_size = kernel_size def forward(self, input_0, input_1, input_2): primals_2 = self.query_layer.weight primals_3 = self.query_layer.bias primals_4 = self.key_layer.weight primals_5 = self.key_layer.bias primals_7 = self.value_layer.weight primals_8 = self.value_layer.bias primals_10 = self.output_layer.weight primals_11 = self.output_layer.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
gheyret/EfficientConformer
LocalMultiHeadAttention
false
15,443
[ "Apache-2.0" ]
101
b28a0aaa3b182f72abaccbeb12df0402adf96097
https://github.com/gheyret/EfficientConformer/tree/b28a0aaa3b182f72abaccbeb12df0402adf96097
NormedMSE
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/f4/cf4wzaoib6zlvrqqjktwzisb46krxyk3mdts3jt54cj43kfq3q7d.py # Topologically Sorted Source Nodes: [inp, tgt, mse_loss], Original ATen: [aten.div, aten.mse_loss] # Source node to ATen node mapping: # inp => div # mse_loss => mean, pow_5, sub # tgt => div_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %expand_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_5,), kwargs = {}) triton_red_fused_div_mse_loss_0 = async_compile.triton('triton_red_fused_div_mse_loss_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 256], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_div_mse_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = (rindex // 4) tmp0 = tl.load(in_ptr0 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr0 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_first', other=0.0) tmp17 = tl.load(in_ptr1 + (4*r1), rmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.load(in_ptr1 + (1 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr1 + (2 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp25 = tl.load(in_ptr1 + (3 + (4*r1)), rmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp36 = 256.0 tmp37 = tmp34 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp37, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [inp, tgt, mse_loss], Original ATen: [aten.div, aten.mse_loss] stream0 = get_raw_stream(0) triton_red_fused_div_mse_loss_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class NormedMSE(nn.MSELoss): def forward(self, inp, tgt, *args, **kwargs): """ Args: inp: (*, C) tgt: (*, C) Will normalize the input before the loss """ inp = nn.functional.normalize(inp, dim=-1, p=2) tgt = nn.functional.normalize(tgt, dim=-1, p=2) return super().forward(inp, tgt, *args, **kwargs) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_red_fused_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp34 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex r1 = rindex // 4 tmp0 = tl.load(in_ptr0 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr0 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tl.load(in_ptr0 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_first', other=0.0) tmp17 = tl.load(in_ptr1 + 4 * r1, rmask, eviction_policy= 'evict_last', other=0.0) tmp19 = tl.load(in_ptr1 + (1 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp22 = tl.load(in_ptr1 + (2 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp25 = tl.load(in_ptr1 + (3 + 4 * r1), rmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 - tmp30 tmp32 = tmp31 * tmp31 tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK]) tmp35 = _tmp34 + tmp33 _tmp34 = tl.where(rmask, tmp35, _tmp34) tmp34 = tl.sum(_tmp34, 1)[:, None] tmp36 = 256.0 tmp37 = tmp34 / tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_red_fused_div_mse_loss_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256, XBLOCK=1, RBLOCK=256, num_warps=8, num_stages=1) del arg0_1 del arg1_1 return buf2, class NormedMSENew(nn.MSELoss): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
gongda0e/AVT
NormedMSE
false
15,444
[ "Apache-2.0" ]
102
d6a7032b86416e852c76cc04a20ccabe34f111dc
https://github.com/gongda0e/AVT/tree/d6a7032b86416e852c76cc04a20ccabe34f111dc
output
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/z4/cz4knm5gsry5nxmyw5m42l5ulhzyzd6olpaz4oc7xc27t3albqx7.py # Topologically Sorted Source Nodes: [conv2d, score], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # conv2d => convolution # score => sigmoid # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6z/c6zrkuhgkfkyvxhei4pwwxvwvogxml5geskw4y2qiy5xycvg4g3q.py # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5p/c5p5qppzecxcvl2fokkcfo32s4cs6s4z3s2aeii7lx2jg75iaktc.py # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ac/cac4mw3iao4k4bswi2qzczxq7bmd4wgizlt7atn5szsc3mfyqjcs.py # Topologically Sorted Source Nodes: [geo], Original ATen: [aten.cat] # Source node to ATen node mapping: # geo => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %mul_1], 1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 81920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 5 x0 = xindex % 4096 x2 = (xindex // 20480) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (16384*x2)), tmp4, other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = 512.0 tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr1 + (x0 + (4096*x2)), tmp11, eviction_policy='evict_last', other=0.0) tmp15 = tl.sigmoid(tmp14) tmp16 = 0.5 tmp17 = tmp15 - tmp16 tmp18 = 3.141592653589793 tmp19 = tmp17 * tmp18 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp11, tmp19, tmp20) tmp22 = tl.where(tmp4, tmp10, tmp21) tl.store(out_ptr0 + (x3), tmp22, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (1, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1)) assert_size_stride(primals_4, (4, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, score], Original ATen: [aten.convolution, aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_convolution_sigmoid_0.run(buf1, primals_2, 16384, grid=grid(16384), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 65536, grid=grid(65536), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(primals_3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_7, 16384, grid=grid(16384), stream=stream0) del primals_7 buf6 = empty_strided_cuda((4, 5, 64, 64), (20480, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [geo], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(buf3, buf5, buf6, 81920, grid=grid(81920), stream=stream0) return (buf1, buf6, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 32, 64, 64), (131072, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class output(nn.Module): def __init__(self, scope=512): super(output, self).__init__() self.conv1 = nn.Conv2d(32, 1, 1) self.sigmoid1 = nn.Sigmoid() self.conv2 = nn.Conv2d(32, 4, 1) self.sigmoid2 = nn.Sigmoid() self.conv3 = nn.Conv2d(32, 1, 1) self.sigmoid3 = nn.Sigmoid() self.scope = 512 for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): score = self.sigmoid1(self.conv1(x)) loc = self.sigmoid2(self.conv2(x)) * self.scope angle = (self.sigmoid3(self.conv3(x)) - 0.5) * math.pi geo = torch.cat((loc, angle), 1) return score, geo def get_inputs(): return [torch.rand([4, 32, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 5 x0 = xindex % 4096 x2 = xindex // 20480 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 16384 * x2), tmp4, other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = 512.0 tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp14 = tl.load(in_ptr1 + (x0 + 4096 * x2), tmp11, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.sigmoid(tmp14) tmp16 = 0.5 tmp17 = tmp15 - tmp16 tmp18 = 3.141592653589793 tmp19 = tmp17 * tmp18 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp11, tmp19, tmp20) tmp22 = tl.where(tmp4, tmp10, tmp21) tl.store(out_ptr0 + x3, tmp22, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (1, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1)) assert_size_stride(primals_4, (4, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_sigmoid_0[grid(16384)](buf1, primals_2, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(65536)](buf3, primals_5, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(primals_3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(16384)](buf5, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 5, 64, 64), (20480, 4096, 64, 1), torch.float32) triton_poi_fused_cat_3[grid(81920)](buf3, buf5, buf6, 81920, XBLOCK =512, num_warps=8, num_stages=1) return (buf1, buf6, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5) class outputNew(nn.Module): def __init__(self, scope=512): super(outputNew, self).__init__() self.conv1 = nn.Conv2d(32, 1, 1) self.sigmoid1 = nn.Sigmoid() self.conv2 = nn.Conv2d(32, 4, 1) self.sigmoid2 = nn.Sigmoid() self.conv3 = nn.Conv2d(32, 1, 1) self.sigmoid3 = nn.Sigmoid() self.scope = 512 for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 outputNew = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return outputNew[0], outputNew[1]
glc12125/EAST
output
false
15,445
[ "MIT" ]
366
cec7ae98f9c21a475b935f74f4c3969f3a989bd4
https://github.com/glc12125/EAST/tree/cec7ae98f9c21a475b935f74f4c3969f3a989bd4
VirtualBatchNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/23/c23jg33pa2prp4iwyevdetkedocjv56blfdlx3hu2waow3n2qbb3.py # Topologically Sorted Source Nodes: [mean, var], Original ATen: [aten.mean, aten.var] # Source node to ATen node mapping: # mean => mean # var => var # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [0], True), kwargs = {}) # %var : [num_users=2] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [0]), kwargs = {correction: 1, keepdim: True}) triton_poi_fused_mean_var_0 = async_compile.triton('triton_poi_fused_mean_var_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_var_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/w2/cw2sqrlevlzzmj3khrkrchps3gaeoh3ly45dxijk5luzkvnebeie.py # Topologically Sorted Source Nodes: [sub, add, sqrt, truediv, mul, y], Original ATen: [aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # sqrt => sqrt # sub => sub # truediv => div # y => add_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_1), kwargs = {}) triton_poi_fused_add_div_mul_sqrt_sub_1 = async_compile.triton('triton_poi_fused_add_div_mul_sqrt_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tmp2 / tmp6 tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tl.store(out_ptr0 + (x3), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, var], Original ATen: [aten.mean, aten.var] stream0 = get_raw_stream(0) triton_poi_fused_mean_var_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, add, sqrt, truediv, mul, y], Original ATen: [aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mul_sqrt_sub_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf2, buf1, buf0, primals_1, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class VirtualBatchNorm(nn.Module): """ Applies Virtual Batch Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in paper `Improved Techniques for Training GANs`: https://arxiv.org/abs/1606.03498 .. math:: y = \\frac{x - \\mathrm{E}[x_\\text{ref}]}{ \\sqrt{\\mathrm{Var}[x_\\text{ref}] + \\epsilon}} * \\gamma + \\beta VirtualBatchNorm requires two forward passes. First one is to calculate mean and variance over a reference batch and second is to calculate the actual output. Args: num_features: :math:`C` from an expected input of size :math:`(N, C, H, W)` eps: a value added to the denominator for numerical stability. Default: 1e-5 """ def __init__(self, num_features, eps=1e-05): super(VirtualBatchNorm, self).__init__() self.num_features = num_features self.eps = eps self.mean = None self.var = None self.weight = nn.parameter.Parameter(torch.Tensor(num_features)) self.bias = nn.parameter.Parameter(torch.Tensor(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def normalize(self, x): y = (x - self.mean) / torch.sqrt(self.var + self.eps ) * self.weight.view(1, self.num_features, 1, 1) + self.bias.view( 1, self.num_features, 1, 1) return y def forward(self, x): """""" if self.mean is None and self.var is None: self.mean = torch.mean(x, dim=0, keepdim=True) self.var = torch.var(x, dim=0, keepdim=True) out = self.normalize(x) else: out = self.normalize(x) self.mean = None self.var = None return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_var_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp21, xmask) @triton.jit def triton_poi_fused_add_div_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tmp2 / tmp6 tmp9 = tmp7 * tmp8 tmp11 = tmp9 + tmp10 tl.store(out_ptr0 + x3, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_var_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_sqrt_sub_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf2, buf1, buf0, primals_1, buf0, buf1 class VirtualBatchNormNew(nn.Module): """ Applies Virtual Batch Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in paper `Improved Techniques for Training GANs`: https://arxiv.org/abs/1606.03498 .. math:: y = \\frac{x - \\mathrm{E}[x_\\text{ref}]}{ \\sqrt{\\mathrm{Var}[x_\\text{ref}] + \\epsilon}} * \\gamma + \\beta VirtualBatchNorm requires two forward passes. First one is to calculate mean and variance over a reference batch and second is to calculate the actual output. Args: num_features: :math:`C` from an expected input of size :math:`(N, C, H, W)` eps: a value added to the denominator for numerical stability. Default: 1e-5 """ def __init__(self, num_features, eps=1e-05): super(VirtualBatchNormNew, self).__init__() self.num_features = num_features self.eps = eps self.mean = None self.var = None self.weight = nn.parameter.Parameter(torch.Tensor(num_features)) self.bias = nn.parameter.Parameter(torch.Tensor(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def normalize(self, x): y = (x - self.mean) / torch.sqrt(self.var + self.eps ) * self.weight.view(1, self.num_features, 1, 1) + self.bias.view( 1, self.num_features, 1, 1) return y def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
goktug97/estorch
VirtualBatchNorm
false
15,446
[ "MIT" ]
53
aa7318b0662faadece1ac9eb241b895d028d613d
https://github.com/goktug97/estorch/tree/aa7318b0662faadece1ac9eb241b895d028d613d
SimmatModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mf/cmfacdeh6vjcea6klvpa2izxaljxxu7laxdtuzrkb3pxxcikgp4h.py # Topologically Sorted Source Nodes: [a_denom, b_denom, mul, sim_1], Original ATen: [aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # a_denom => add # b_denom => add_1 # mul => mul # sim_1 => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, 1e-09), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 1e-09), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, %mul), kwargs = {}) triton_poi_fused_add_div_mul_0 = async_compile.triton('triton_poi_fused_add_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (4*x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x4)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x4)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x4)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + (x3), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/oe/coe2zmts3vusxk7vdbhxxsdob7b7ylyunwhmmnrpkk4gsrvsycki.py # Topologically Sorted Source Nodes: [a_denom_1, b_denom_1, mul_1, sim_5], Original ATen: [aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # a_denom_1 => add_2 # b_denom_1 => add_3 # mul_1 => mul_1 # sim_5 => div_1 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_4, 1e-09), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_5, 1e-09), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %add_3), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm_1, %mul_1), kwargs = {}) triton_poi_fused_add_div_mul_1 = async_compile.triton('triton_poi_fused_add_div_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (64 + (4*x4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (65 + (4*x4)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (66 + (4*x4)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (67 + (4*x4)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (64 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (65 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (66 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (67 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + (x3), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ig/ciglo32lpxefzh6pphhumvkwdhe2hlwwfmcdza4qfdmba3wyiazs.py # Topologically Sorted Source Nodes: [a_denom_2, b_denom_2, mul_2, sim_9], Original ATen: [aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # a_denom_2 => add_4 # b_denom_2 => add_5 # mul_2 => mul_2 # sim_9 => div_2 # Graph fragment: # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_8, 1e-09), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_9, 1e-09), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, %add_5), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm_2, %mul_2), kwargs = {}) triton_poi_fused_add_div_mul_2 = async_compile.triton('triton_poi_fused_add_div_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (128 + (4*x4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (129 + (4*x4)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (130 + (4*x4)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (131 + (4*x4)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (128 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (129 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (130 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (131 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + (x3), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ql/cql7orlq7zxffgocedy5ohfpyddboisasznvye6fjkfq25mxofj6.py # Topologically Sorted Source Nodes: [a_denom_3, b_denom_3, mul_3, sim_13], Original ATen: [aten.add, aten.mul, aten.div] # Source node to ATen node mapping: # a_denom_3 => add_6 # b_denom_3 => add_7 # mul_3 => mul_3 # sim_13 => div_3 # Graph fragment: # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_12, 1e-09), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_13, 1e-09), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, %add_7), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm_3, %mul_3), kwargs = {}) triton_poi_fused_add_div_mul_3 = async_compile.triton('triton_poi_fused_add_div_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (192 + (4*x4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (193 + (4*x4)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (194 + (4*x4)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (195 + (4*x4)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (192 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (193 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (194 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (195 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + (x3), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vv/cvv2ti6slkvzgivnm3mq7yh7uljpvgnxqf7osgl7x2objk2vfw5z.py # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%where_1, %where_3, %where_5, %where_7], 1), kwargs = {}) triton_poi_fused_stack_4 = async_compile.triton('triton_poi_fused_stack_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 16 x0 = xindex % 4 x2 = (xindex // 64) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = -1.0 tmp7 = tmp5 == tmp6 tmp8 = tl.load(in_ptr1 + ((4*x2) + x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp8 == tmp6 tmp10 = tl.load(in_ptr2 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tl.where(tmp9, tmp11, tmp10) tmp13 = tl.where(tmp7, tmp11, tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp20 == tmp6 tmp22 = tl.load(in_ptr1 + ((4*x2) + ((-4) + x1)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp22 == tmp6 tmp24 = tl.load(in_ptr3 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp19 & xmask, other=0.0) tmp25 = tl.where(tmp23, tmp11, tmp24) tmp26 = tl.where(tmp21, tmp11, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp19, tmp26, tmp27) tmp29 = tmp0 >= tmp17 tmp30 = tl.full([1], 12, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tmp29 & tmp31 tmp33 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tmp33 == tmp6 tmp35 = tl.load(in_ptr1 + ((4*x2) + ((-8) + x1)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp35 == tmp6 tmp37 = tl.load(in_ptr4 + (x0 + (4*((-8) + x1)) + (16*x2)), tmp32 & xmask, other=0.0) tmp38 = tl.where(tmp36, tmp11, tmp37) tmp39 = tl.where(tmp34, tmp11, tmp38) tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp32, tmp39, tmp40) tmp42 = tmp0 >= tmp30 tmp43 = tl.full([1], 16, tl.int64) tmp44 = tmp0 < tmp43 tmp45 = tl.load(in_ptr0 + (x0 + (4*x2)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp45 == tmp6 tmp47 = tl.load(in_ptr1 + ((4*x2) + ((-12) + x1)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp47 == tmp6 tmp49 = tl.load(in_ptr5 + (x0 + (4*((-12) + x1)) + (16*x2)), tmp42 & xmask, other=0.0) tmp50 = tl.where(tmp48, tmp11, tmp49) tmp51 = tl.where(tmp46, tmp11, tmp50) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp42, tmp51, tmp52) tmp54 = tl.where(tmp32, tmp41, tmp53) tmp55 = tl.where(tmp19, tmp28, tmp54) tmp56 = tl.where(tmp4, tmp15, tmp55) tl.store(out_ptr0 + (x3), tmp56, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(arg3_1, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sim], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [a_denom, b_denom, mul, sim_1], Original ATen: [aten.add, aten.mul, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_0.run(buf1, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sim_4], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 64), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 64), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [a_denom_1, b_denom_1, mul_1, sim_5], Original ATen: [aten.add, aten.mul, aten.div] triton_poi_fused_add_div_mul_1.run(buf3, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sim_8], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 128), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 128), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [a_denom_2, b_denom_2, mul_2, sim_9], Original ATen: [aten.add, aten.mul, aten.div] triton_poi_fused_add_div_mul_2.run(buf5, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sim_12], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 192), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 192), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [a_denom_3, b_denom_3, mul_3, sim_13], Original ATen: [aten.add, aten.mul, aten.div] triton_poi_fused_add_div_mul_3.run(buf7, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf8 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] triton_poi_fused_stack_4.run(arg3_1, arg2_1, buf1, buf3, buf5, buf7, buf8, 256, grid=grid(256), stream=stream0) del arg2_1 del arg3_1 del buf1 del buf3 del buf5 del buf7 return (reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class SimmatModule(torch.nn.Module): def __init__(self, padding=-1): super().__init__() self.padding = padding self._hamming_index_loaded = None self._hamming_index = None def forward(self, query_embed, doc_embed, query_tok, doc_tok): simmat = [] for a_emb, b_emb in zip(query_embed, doc_embed): BAT, A, B = a_emb.shape[0], a_emb.shape[1], b_emb.shape[1] a_denom = a_emb.norm(p=2, dim=2).reshape(BAT, A, 1).expand(BAT, A, B) + 1e-09 b_denom = b_emb.norm(p=2, dim=2).reshape(BAT, 1, B).expand(BAT, A, B) + 1e-09 perm = b_emb.permute(0, 2, 1) sim = a_emb.bmm(perm) sim = sim / (a_denom * b_denom) nul = torch.zeros_like(sim) sim = torch.where(query_tok.reshape(BAT, A, 1).expand(BAT, A, B ) == self.padding, nul, sim) sim = torch.where(doc_tok.reshape(BAT, 1, B).expand(BAT, A, B) == self.padding, nul, sim) simmat.append(sim) return torch.stack(simmat, dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 1]), torch.rand([4, 1, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + 4 * x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x4), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x4), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x4), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + x3, tmp29, xmask) @triton.jit def triton_poi_fused_add_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (64 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (65 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp6 = tl.load(in_ptr0 + (66 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (67 + 4 * x4), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (64 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (65 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (66 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (67 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + x3, tmp29, xmask) @triton.jit def triton_poi_fused_add_div_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (128 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (129 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (130 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (131 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (128 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (129 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (130 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (131 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + x3, tmp29, xmask) @triton.jit def triton_poi_fused_add_div_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (192 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (193 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (194 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (195 + 4 * x4), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (192 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (193 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (194 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (195 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-09 tmp14 = tmp12 + tmp13 tmp16 = tmp15 * tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp26 + tmp13 tmp28 = tmp14 * tmp27 tmp29 = tmp0 / tmp28 tl.store(in_out_ptr0 + x3, tmp29, xmask) @triton.jit def triton_poi_fused_stack_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x0 = xindex % 4 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = -1.0 tmp7 = tmp5 == tmp6 tmp8 = tl.load(in_ptr1 + (4 * x2 + x1), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tmp8 == tmp6 tmp10 = tl.load(in_ptr2 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tl.where(tmp9, tmp11, tmp10) tmp13 = tl.where(tmp7, tmp11, tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tmp17 = tl.full([1], 8, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp19 & xmask, eviction_policy ='evict_last', other=0.0) tmp21 = tmp20 == tmp6 tmp22 = tl.load(in_ptr1 + (4 * x2 + (-4 + x1)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tmp22 == tmp6 tmp24 = tl.load(in_ptr3 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp19 & xmask, other=0.0) tmp25 = tl.where(tmp23, tmp11, tmp24) tmp26 = tl.where(tmp21, tmp11, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp19, tmp26, tmp27) tmp29 = tmp0 >= tmp17 tmp30 = tl.full([1], 12, tl.int64) tmp31 = tmp0 < tmp30 tmp32 = tmp29 & tmp31 tmp33 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp32 & xmask, eviction_policy ='evict_last', other=0.0) tmp34 = tmp33 == tmp6 tmp35 = tl.load(in_ptr1 + (4 * x2 + (-8 + x1)), tmp32 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp35 == tmp6 tmp37 = tl.load(in_ptr4 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp32 & xmask, other=0.0) tmp38 = tl.where(tmp36, tmp11, tmp37) tmp39 = tl.where(tmp34, tmp11, tmp38) tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp32, tmp39, tmp40) tmp42 = tmp0 >= tmp30 tl.full([1], 16, tl.int64) tmp45 = tl.load(in_ptr0 + (x0 + 4 * x2), tmp42 & xmask, eviction_policy ='evict_last', other=0.0) tmp46 = tmp45 == tmp6 tmp47 = tl.load(in_ptr1 + (4 * x2 + (-12 + x1)), tmp42 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp47 == tmp6 tmp49 = tl.load(in_ptr5 + (x0 + 4 * (-12 + x1) + 16 * x2), tmp42 & xmask, other=0.0) tmp50 = tl.where(tmp48, tmp11, tmp49) tmp51 = tl.where(tmp46, tmp11, tmp50) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp42, tmp51, tmp52) tmp54 = tl.where(tmp32, tmp41, tmp53) tmp55 = tl.where(tmp19, tmp28, tmp54) tmp56 = tl.where(tmp4, tmp15, tmp55) tl.store(out_ptr0 + x3, tmp56, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(arg3_1, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_mul_0[grid(64)](buf1, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 64), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 64), out =buf2) buf3 = buf2 del buf2 triton_poi_fused_add_div_mul_1[grid(64)](buf3, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 128), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 128), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_add_div_mul_2[grid(64)](buf5, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 4, 1), 192), reinterpret_tensor(arg1_1, (4, 4, 4), (16, 1, 4), 192), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_add_div_mul_3[grid(64)](buf7, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf8 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused_stack_4[grid(256)](arg3_1, arg2_1, buf1, buf3, buf5, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg2_1 del arg3_1 del buf1 del buf3 del buf5 del buf7 return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), class SimmatModuleNew(torch.nn.Module): def __init__(self, padding=-1): super().__init__() self.padding = padding self._hamming_index_loaded = None self._hamming_index = None def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
gitter-badger/FlexNeuART
SimmatModule
false
15,447
[ "Apache-2.0" ]
101
f69e5421bdebe9db0d993b5470dace61872f90df
https://github.com/gitter-badger/FlexNeuART/tree/f69e5421bdebe9db0d993b5470dace61872f90df
NaiveGroupNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/5r/c5rdz5pqjqqsgx62pgljudybkgqw7637rjp23qdmziwn4mfhay4j.py # Topologically Sorted Source Nodes: [mean, pow_1, mean_1, pow_2, var, add, std, mul, input_4], Original ATen: [aten.mean, aten.pow, aten.sub, aten.add, aten.sqrt, aten.mul] # Source node to ATen node mapping: # add => add # input_4 => add_1 # mean => mean # mean_1 => mean_1 # mul => mul # pow_1 => pow_1 # pow_2 => pow_2 # std => sqrt # var => sub # Graph fragment: # %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1], True), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mean, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean_1, %pow_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-05), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_3), kwargs = {}) triton_per_fused_add_mean_mul_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_pow_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 64.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp0 - tmp11 tmp19 = tmp18 / tmp17 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp17, xmask) tl.store(out_ptr0 + (r1 + (64*x0)), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0); del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, pow_1, mean_1, pow_2, var, add, std, mul, input_4], Original ATen: [aten.mean, aten.pow, aten.sub, aten.add, aten.sqrt, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_pow_sqrt_sub_0.run(buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, grid=grid(4), stream=stream0) del primals_2 del primals_3 return (buf4, primals_1, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch.nn import Parameter from torch.nn import init import torch.nn.parallel class NaiveGroupNorm(Module): """NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch. It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX. The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`. Args: num_groups (int): number of groups to separate the channels into num_channels (int): number of channels expected in input eps: a value added to the denominator for numerical stability. Default: 1e-5 affine: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> # Separate 6 channels into 3 groups >>> m = NaiveGroupNorm(3, 6) >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = NaiveGroupNorm(6, 6) >>> # Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = NaiveGroupNorm(1, 6) >>> # Activating the module >>> output = m(input) .. _`Group Normalization`: https://arxiv.org/abs/1803.08494 """ __constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', 'bias'] def __init__(self, num_groups, num_channels, eps=1e-05, affine=True): super(NaiveGroupNorm, self).__init__() self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.Tensor(num_channels)) self.bias = Parameter(torch.Tensor(num_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input): N, C, H, W = input.size() assert C % self.num_groups == 0 input = input.reshape(N, self.num_groups, -1) mean = input.mean(dim=-1, keepdim=True) var = (input ** 2).mean(dim=-1, keepdim=True) - mean ** 2 std = torch.sqrt(var + self.eps) input = (input - mean) / std input = input.reshape(N, C, H, W) if self.affine: input = input * self.weight.reshape(1, C, 1, 1 ) + self.bias.reshape(1, C, 1, 1) return input def extra_repr(self): return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'. format(**self.__dict__)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_groups': 1, 'num_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch.nn import Parameter from torch.nn import init import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = tmp0 * tmp0 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 64.0 tmp11 = tmp4 / tmp10 tmp12 = tmp9 / tmp10 tmp13 = tmp11 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = 1e-05 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp0 - tmp11 tmp19 = tmp18 / tmp17 tmp21 = tmp19 * tmp20 tmp23 = tmp21 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp11, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp17, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp23, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 1, 1), (1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_mean_mul_pow_sqrt_sub_0[grid(4)](buf1, buf3, primals_1, primals_2, primals_3, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return buf4, primals_1, buf1, buf3 class NaiveGroupNormNew(Module): """NaiveGroupNorm implements Group Normalization with the high-level matrix operations in PyTorch. It is a temporary solution to export GN by ONNX before the official GN can be exported by ONNX. The usage of NaiveGroupNorm is exactly the same as the official :class:`torch.nn.GroupNorm`. Args: num_groups (int): number of groups to separate the channels into num_channels (int): number of channels expected in input eps: a value added to the denominator for numerical stability. Default: 1e-5 affine: a boolean value that when set to ``True``, this module has learnable per-channel affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}` - Output: :math:`(N, C, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 6, 10, 10) >>> # Separate 6 channels into 3 groups >>> m = NaiveGroupNorm(3, 6) >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm) >>> m = NaiveGroupNorm(6, 6) >>> # Put all 6 channels into a single group (equivalent with LayerNorm) >>> m = NaiveGroupNorm(1, 6) >>> # Activating the module >>> output = m(input) .. _`Group Normalization`: https://arxiv.org/abs/1803.08494 """ __constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', 'bias'] def __init__(self, num_groups, num_channels, eps=1e-05, affine=True): super(NaiveGroupNormNew, self).__init__() self.num_groups = num_groups self.num_channels = num_channels self.eps = eps self.affine = affine if self.affine: self.weight = Parameter(torch.Tensor(num_channels)) self.bias = Parameter(torch.Tensor(num_channels)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.affine: init.ones_(self.weight) init.zeros_(self.bias) def extra_repr(self): return ('{num_groups}, {num_channels}, eps={eps}, affine={affine}'. format(**self.__dict__)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
gist-ailab/uoais
NaiveGroupNorm
false
15,448
[ "BSD-2-Clause" ]
52
fb42d9a96cd54daad61c956d8d9d65dd0ebef4c7
https://github.com/gist-ailab/uoais/tree/fb42d9a96cd54daad61c956d8d9d65dd0ebef4c7
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/fw/cfwkei3isw2pa2xamwapcpaqiplvhdz4zywym425thozidzx67qh.py # Topologically Sorted Source Nodes: [logits], Original ATen: [aten._softmax] # Source node to ATen node mapping: # logits => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (4*x1), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), None, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6x/c6xwmwqiowkgjifetryiou7grm3j3yh47xzqxfhbfn6iod2x53ci.py # Topologically Sorted Source Nodes: [logits], Original ATen: [aten._softmax] # Source node to ATen node mapping: # logits => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (4*x1), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hn/chny2er4wobzy5w7o5w6rfshqgzq6kjifapigwwqu5pzmqi464o4.py # Topologically Sorted Source Nodes: [idx, one_hot_key_1, mul, sub, pow_1, mul_1, add, log, loss, loss_1, mean], Original ATen: [aten._to_copy, aten.scatter, aten.mul, aten.rsub, aten.pow, aten.add, aten.log, aten.sum, aten.mean] # Source node to ATen node mapping: # add => add # idx => convert_element_type # log => log # loss => mul_2 # loss_1 => sum_2 # mean => mean # mul => mul # mul_1 => mul_1 # one_hot_key_1 => scatter_upon_const_tensor # pow_1 => pow_1 # sub => sub_1 # Graph fragment: # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %scatter_upon_const_tensor : [num_users=1] = call_function[target=torch._inductor.fx_passes.post_grad.scatter_upon_const_tensor](args = (), kwargs = {shape: [256, 4], background_val: 0, dtype: torch.float32, dim: 1, selector: %convert_element_type, val: 1}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%scatter_upon_const_tensor, -0.25), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %pow_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1e-09), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {}) triton_red_fused__to_copy_add_log_mean_mul_pow_rsub_scatter_sum_2 = async_compile.triton('triton_red_fused__to_copy_add_log_mean_mul_pow_rsub_scatter_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__to_copy_add_log_mean_mul_pow_rsub_scatter_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__to_copy_add_log_mean_mul_pow_rsub_scatter_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp42 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = (rindex // 4) % 256 r0 = rindex % 4 r2 = (rindex // 1024) r4 = rindex % 1024 r3 = rindex tmp0 = tl.load(in_ptr0 + (r1), rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (r4 + (4096*r2)), rmask, eviction_policy='evict_first', other=0.0) tmp17 = tl.load(in_ptr1 + (1024 + r4 + (4096*r2)), rmask, eviction_policy='evict_first', other=0.0) tmp25 = tl.load(in_ptr1 + (2048 + r4 + (4096*r2)), rmask, eviction_policy='evict_first', other=0.0) tmp33 = tl.load(in_ptr1 + (3072 + r4 + (4096*r2)), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0.to(tl.int64) tmp2 = r0 tmp3 = tmp1 == tmp2 tmp4 = 1.0 tmp5 = 0.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = -0.25 tmp8 = tmp6 * tmp7 tmp10 = tmp4 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp8 * tmp11 tmp13 = 1e-09 tmp14 = tmp9 + tmp13 tmp15 = tl_math.log(tmp14) tmp16 = tmp12 * tmp15 tmp18 = tmp4 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tmp8 * tmp19 tmp21 = tmp17 + tmp13 tmp22 = tl_math.log(tmp21) tmp23 = tmp20 * tmp22 tmp24 = tmp16 + tmp23 tmp26 = tmp4 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tmp8 * tmp27 tmp29 = tmp25 + tmp13 tmp30 = tl_math.log(tmp29) tmp31 = tmp28 * tmp30 tmp32 = tmp24 + tmp31 tmp34 = tmp4 - tmp33 tmp35 = tmp34 * tmp34 tmp36 = tmp8 * tmp35 tmp37 = tmp33 + tmp13 tmp38 = tl_math.log(tmp37) tmp39 = tmp36 * tmp38 tmp40 = tmp32 + tmp39 tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK]) tmp43 = _tmp42 + tmp41 _tmp42 = tl.where(rmask, tmp43, _tmp42) tmp42 = tl.sum(_tmp42, 1)[:, None] tmp44 = 4096.0 tmp45 = tmp42 / tmp44 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp45, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 256, 4), (4096, 1024, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 256, 4), (4096, 1024, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 16384, grid=grid(16384), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((4, 4, 256, 4), (4096, 1024, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 16384, grid=grid(16384), stream=stream0) del buf0 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [idx, one_hot_key_1, mul, sub, pow_1, mul_1, add, log, loss, loss_1, mean], Original ATen: [aten._to_copy, aten.scatter, aten.mul, aten.rsub, aten.pow, aten.add, aten.log, aten.sum, aten.mean] triton_red_fused__to_copy_add_log_mean_mul_pow_rsub_scatter_sum_2.run(buf4, arg0_1, buf1, 1, 4096, grid=grid(1), stream=stream0) del arg0_1 del buf1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 256, 4), (4096, 1024, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FocalLoss(nn.Module): """ Softmax and sigmoid focal loss. copy from https://github.com/lonePatient/TorchBlocks """ def __init__(self, num_labels, activation_type='softmax', gamma=2.0, alpha=0.25, epsilon=1e-09): super(FocalLoss, self).__init__() self.num_labels = num_labels self.gamma = gamma self.alpha = alpha self.epsilon = epsilon self.activation_type = activation_type def forward(self, input, target): """ Args: logits: model's output, shape of [batch_size, num_cls] target: ground truth labels, shape of [batch_size] Returns: shape of [batch_size] """ if self.activation_type == 'softmax': idx = target.view(-1, 1).long() one_hot_key = torch.zeros(idx.size(0), self.num_labels, dtype= torch.float32, device=idx.device) one_hot_key = one_hot_key.scatter_(1, idx, 1) logits = torch.softmax(input, dim=-1) loss = -self.alpha * one_hot_key * torch.pow(1 - logits, self.gamma ) * (logits + self.epsilon).log() loss = loss.sum(1) elif self.activation_type == 'sigmoid': multi_hot_key = target logits = torch.sigmoid(input) zero_hot_key = 1 - multi_hot_key loss = -self.alpha * multi_hot_key * torch.pow(1 - logits, self .gamma) * (logits + self.epsilon).log() loss += -(1 - self.alpha) * zero_hot_key * torch.pow(logits, self.gamma) * (1 - logits + self.epsilon).log() return loss.mean() def get_inputs(): return [torch.rand([4, 4, 256, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_labels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + 4 * x1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), None, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + 4 * x1, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_red_fused__to_copy_add_log_mean_mul_pow_rsub_scatter_sum_2( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp42 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex // 4 % 256 r0 = rindex % 4 r2 = rindex // 1024 r4 = rindex % 1024 tmp0 = tl.load(in_ptr0 + r1, rmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (r4 + 4096 * r2), rmask, eviction_policy= 'evict_first', other=0.0) tmp17 = tl.load(in_ptr1 + (1024 + r4 + 4096 * r2), rmask, eviction_policy='evict_first', other=0.0) tmp25 = tl.load(in_ptr1 + (2048 + r4 + 4096 * r2), rmask, eviction_policy='evict_first', other=0.0) tmp33 = tl.load(in_ptr1 + (3072 + r4 + 4096 * r2), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0.to(tl.int64) tmp2 = r0 tmp3 = tmp1 == tmp2 tmp4 = 1.0 tmp5 = 0.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = -0.25 tmp8 = tmp6 * tmp7 tmp10 = tmp4 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp8 * tmp11 tmp13 = 1e-09 tmp14 = tmp9 + tmp13 tmp15 = tl_math.log(tmp14) tmp16 = tmp12 * tmp15 tmp18 = tmp4 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tmp8 * tmp19 tmp21 = tmp17 + tmp13 tmp22 = tl_math.log(tmp21) tmp23 = tmp20 * tmp22 tmp24 = tmp16 + tmp23 tmp26 = tmp4 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tmp8 * tmp27 tmp29 = tmp25 + tmp13 tmp30 = tl_math.log(tmp29) tmp31 = tmp28 * tmp30 tmp32 = tmp24 + tmp31 tmp34 = tmp4 - tmp33 tmp35 = tmp34 * tmp34 tmp36 = tmp8 * tmp35 tmp37 = tmp33 + tmp13 tmp38 = tl_math.log(tmp37) tmp39 = tmp36 * tmp38 tmp40 = tmp32 + tmp39 tmp41 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK]) tmp43 = _tmp42 + tmp41 _tmp42 = tl.where(rmask, tmp43, _tmp42) tmp42 = tl.sum(_tmp42, 1)[:, None] tmp44 = 4096.0 tmp45 = tmp42 / tmp44 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp45, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 256, 4), (4096, 1024, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 256, 4), (4096, 1024, 4, 1), torch .float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16384)](arg1_1, buf0, 16384, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 256, 4), (4096, 1024, 4, 1), torch .float32) triton_poi_fused__softmax_1[grid(16384)](buf0, buf1, 16384, XBLOCK= 256, num_warps=4, num_stages=1) del buf0 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_red_fused__to_copy_add_log_mean_mul_pow_rsub_scatter_sum_2[grid (1)](buf4, arg0_1, buf1, 1, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del arg0_1 del buf1 return buf4, class FocalLossNew(nn.Module): """ Softmax and sigmoid focal loss. copy from https://github.com/lonePatient/TorchBlocks """ def __init__(self, num_labels, activation_type='softmax', gamma=2.0, alpha=0.25, epsilon=1e-09): super(FocalLossNew, self).__init__() self.num_labels = num_labels self.gamma = gamma self.alpha = alpha self.epsilon = epsilon self.activation_type = activation_type def forward(self, input_0, input_1): arg1_1 = input_0 arg0_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
gitabtion/BertBasedCscModels
FocalLoss
false
15,449
[ "Apache-2.0" ]
158
1daf505d109c5922eeedb6674edbb1b73db21e45
https://github.com/gitabtion/BertBasedCscModels/tree/1daf505d109c5922eeedb6674edbb1b73db21e45
LinearClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py # Topologically Sorted Source Nodes: [logits_1], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # logits_1 => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/32/c32vfxouqe74ea5scuzrdhpd7r6adxwu4bzarm4icjfnb47jbizg.py # Topologically Sorted Source Nodes: [logits_1], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # logits_1 => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits_1], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [logits_1], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) del buf1 return (buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import logging import random import torch import torch.nn.functional as F import torch.nn as nn from typing import List import torch.onnx.operators from functools import wraps def singleton(cls): """ Singleton decorator Args: cls: singleton class Returns: - an instance of a singleton class """ instances = {} @wraps(cls) def getinstance(*args, **kwargs): if cls not in instances: instances[cls] = cls(*args, **kwargs) return instances[cls] return getinstance def get_invalid_class_mask(classes: 'int', invalid_classes: 'List'): """ Create mask for invalid classes Args: classes: number of labels invalid_classes: invalid class list Returns: - mask for invalid class :math:`(1, L)` where L is the number of classes """ invalid_class_mask = torch.zeros(classes).bool() if invalid_classes: for idx in invalid_classes: invalid_class_mask[idx] = True invalid_class_mask = invalid_class_mask.unsqueeze(dim=0) env = Environment() if env.device.startswith('cuda'): invalid_class_mask = invalid_class_mask return invalid_class_mask @singleton class Environment: """ Environment is a running environment class. Args: profiling_window: profiling window size configs: configs for running tasks debug: running with debug information no_warning: do not output warning informations seed: initial seed for random and torch device: running device fp16: running with fp16 no_progress_bar: do not show progress bar pb_interval: show progress bar with an interval """ def __init__(self, configs=None, profiling_window: 'int'=0, debug: 'bool'=False, no_warning: 'bool'=False, seed: 'int'=0, device: 'str'=None, fp16: 'bool'=False, no_progress_bar: 'bool'=False, pb_interval: 'int'=1, custom_libs: 'str'=None): self.profiling_window = profiling_window self.configs = configs self.debug = debug self.no_warning = no_warning self.seed = seed self.fp16 = fp16 self.no_progress_bar = no_progress_bar self.pb_interval = pb_interval self.distributed_world = 1 self.rank = 0 self.local_rank = 0 if device is None: if torch.cuda.is_available(): self.device = 'cuda' else: self.device = 'cpu' else: self.device = device if self.device == 'cuda': self._init_cuda() self._init_log() self._init_seed() self._import_custom_lib(custom_libs) def _init_log(self): FORMAT = ( f"%(asctime)s | %(levelname)s | %(name)s |{f' RANK {self.rank} | ' if not self.is_master() else ' '}%(message)s" ) logging.basicConfig(format=FORMAT, datefmt='%Y-%m-%d,%H:%M:%S', level=logging.INFO) if not self.is_master(): logging.disable(logging.INFO) def _import_custom_lib(self, path): """ Import library manually Args: path: external libraries split with `,` """ if path: path = path.strip('\n') for line in path.split(','): logger.info(f'import module from {line}') line = line.replace('/', '.') importlib.import_module(line) def _init_cuda(self): """ Initialize cuda device We assume that the user will not run ParaGen on more than one workers with only 1 GPU used on each worker. """ if torch.cuda.device_count() > 1: hvd.init() torch.cuda.set_device(hvd.local_rank()) self.rank = hvd.rank() self.local_rank = hvd.local_rank() self.distributed_world = hvd.size() torch.cuda.empty_cache() def _init_seed(self): """ Initialize global seed """ random.seed(self.seed) import torch torch.manual_seed(self.seed) if self.device == 'cuda': torch.manual_seed(self.seed) def is_master(self): """ check the current process is the master process """ return self.rank == 0 class LinearClassifier(nn.Module): """ Classifier with only on a linear projection. Args: d_model: feature dimensionality labels: number of classes invalid_classes (List): class that is not allowed to produce """ def __init__(self, d_model, labels, invalid_classes: 'List'=None): super().__init__() self._linear = nn.Linear(d_model, labels, bias=False) self._invalid_class_mask = get_invalid_class_mask(labels, invalid_classes) if invalid_classes else None def forward(self, x): """ Args: x: feature to predict labels :math:`(*, D)`, where D is the feature dimension Returns: - log probability of each classes :math: `(*, L)`, where L is the number of classes """ logits = self._linear(x) if self._invalid_class_mask is not None: logits = logits.masked_fill(self._invalid_class_mask, float('-inf') ) logits = F.log_softmax(logits, dim=-1) return logits def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'labels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import logging import random import torch.nn as nn from typing import List import torch.onnx.operators from functools import wraps assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2 def singleton(cls): """ Singleton decorator Args: cls: singleton class Returns: - an instance of a singleton class """ instances = {} @wraps(cls) def getinstance(*args, **kwargs): if cls not in instances: instances[cls] = cls(*args, **kwargs) return instances[cls] return getinstance def get_invalid_class_mask(classes: 'int', invalid_classes: 'List'): """ Create mask for invalid classes Args: classes: number of labels invalid_classes: invalid class list Returns: - mask for invalid class :math:`(1, L)` where L is the number of classes """ invalid_class_mask = torch.zeros(classes).bool() if invalid_classes: for idx in invalid_classes: invalid_class_mask[idx] = True invalid_class_mask = invalid_class_mask.unsqueeze(dim=0) env = Environment() if env.device.startswith('cuda'): invalid_class_mask = invalid_class_mask return invalid_class_mask @singleton class Environment: """ Environment is a running environment class. Args: profiling_window: profiling window size configs: configs for running tasks debug: running with debug information no_warning: do not output warning informations seed: initial seed for random and torch device: running device fp16: running with fp16 no_progress_bar: do not show progress bar pb_interval: show progress bar with an interval """ def __init__(self, configs=None, profiling_window: 'int'=0, debug: 'bool'=False, no_warning: 'bool'=False, seed: 'int'=0, device: 'str'=None, fp16: 'bool'=False, no_progress_bar: 'bool'=False, pb_interval: 'int'=1, custom_libs: 'str'=None): self.profiling_window = profiling_window self.configs = configs self.debug = debug self.no_warning = no_warning self.seed = seed self.fp16 = fp16 self.no_progress_bar = no_progress_bar self.pb_interval = pb_interval self.distributed_world = 1 self.rank = 0 self.local_rank = 0 if device is None: if torch.cuda.is_available(): self.device = 'cuda' else: self.device = 'cpu' else: self.device = device if self.device == 'cuda': self._init_cuda() self._init_log() self._init_seed() self._import_custom_lib(custom_libs) def _init_log(self): FORMAT = ( f"%(asctime)s | %(levelname)s | %(name)s |{f' RANK {self.rank} | ' if not self.is_master() else ' '}%(message)s" ) logging.basicConfig(format=FORMAT, datefmt='%Y-%m-%d,%H:%M:%S', level=logging.INFO) if not self.is_master(): logging.disable(logging.INFO) def _import_custom_lib(self, path): """ Import library manually Args: path: external libraries split with `,` """ if path: path = path.strip('\n') for line in path.split(','): logger.info(f'import module from {line}') line = line.replace('/', '.') importlib.import_module(line) def _init_cuda(self): """ Initialize cuda device We assume that the user will not run ParaGen on more than one workers with only 1 GPU used on each worker. """ if torch.cuda.device_count() > 1: hvd.init() torch.cuda.set_device(hvd.local_rank()) self.rank = hvd.rank() self.local_rank = hvd.local_rank() self.distributed_world = hvd.size() torch.cuda.empty_cache() def _init_seed(self): """ Initialize global seed """ random.seed(self.seed) import torch torch.manual_seed(self.seed) if self.device == 'cuda': torch.manual_seed(self.seed) def is_master(self): """ check the current process is the master process """ return self.rank == 0 class LinearClassifierNew(nn.Module): """ Classifier with only on a linear projection. Args: d_model: feature dimensionality labels: number of classes invalid_classes (List): class that is not allowed to produce """ def __init__(self, d_model, labels, invalid_classes: 'List'=None): super().__init__() self._linear = nn.Linear(d_model, labels, bias=False) self._invalid_class_mask = get_invalid_class_mask(labels, invalid_classes) if invalid_classes else None def forward(self, input_0): primals_1 = self._linear.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
godweiyang/ParaGen
LinearClassifier
false
15,450
[ "Apache-2.0" ]
50
9665d1244ea38a41fc06b4e0a7f6411985e2221f
https://github.com/godweiyang/ParaGen/tree/9665d1244ea38a41fc06b4e0a7f6411985e2221f