entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
FixedSubnetConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/dv/cdvmujdx4wzp3xf3q2rdnt72xso7clbkadprbhlirztwjsalq3rx.py # Topologically Sorted Source Nodes: [w], Original ATen: [aten.mul] # Source node to ATen node mapping: # w => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vb/cvbno3dccglzmlbisnwicoai3aocrgweun3buh6avsdqdjjhjczh.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [w], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 return (buf2, primals_1, primals_2, primals_4, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.multiprocessing import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.nn.functional as F class FixedSubnetConv(nn.Conv2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.scores = nn.Parameter(torch.Tensor(self.weight.size())) nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5)) def set_prune_rate(self, prune_rate): self.prune_rate = prune_rate None def set_subnet(self): output = self.clamped_scores().clone() _, idx = self.clamped_scores().flatten().abs().sort() p = int(self.prune_rate * self.clamped_scores().numel()) flat_oup = output.flatten() flat_oup[idx[:p]] = 0 flat_oup[idx[p:]] = 1 self.scores = torch.nn.Parameter(output) self.scores.requires_grad = False def clamped_scores(self): return self.scores.abs() def get_subnet(self): return self.weight * self.scores def forward(self, x): w = self.get_subnet() x = F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.multiprocessing import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(primals_4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf2, primals_1, primals_2, primals_4, buf0 class FixedSubnetConvNew(nn.Conv2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.scores = nn.Parameter(torch.Tensor(self.weight.size())) nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5)) def set_prune_rate(self, prune_rate): self.prune_rate = prune_rate None def set_subnet(self): output = self.clamped_scores().clone() _, idx = self.clamped_scores().flatten().abs().sort() p = int(self.prune_rate * self.clamped_scores().numel()) flat_oup = output.flatten() flat_oup[idx[:p]] = 0 flat_oup[idx[p:]] = 1 self.scores = torch.nn.Parameter(output) self.scores.requires_grad = False def clamped_scores(self): return self.scores.abs() def get_subnet(self): return self.weight * self.scores def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = self.scores primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
RICE-EIC/Robust_Scratch_Ticket
FixedSubnetConv
false
8,669
[ "MIT" ]
13
f77b41cdaab6db4922a6d4b5970db75a9bfc7257
https://github.com/RICE-EIC/Robust_Scratch_Ticket/tree/f77b41cdaab6db4922a6d4b5970db75a9bfc7257
ImpalaResidual
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/lo/clo7rh3pipp3v33rgizotrhoubr6civizfoi7rsi2sqn44yprnki.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu] # Source node to ATen node mapping: # out => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yp/cypo2ebzk5ljhjmwzftuhaoutmqxqz4h32z457d5fwuk7mxuomat.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # out_1 => convolution # out_2 => relu_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + (x0), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/g6/cg6gbbak4ykb6rdwzw5dzpksdpufmiqi5wdnxhsp7eoitdjwyzta.py # Topologically Sorted Source Nodes: [out_3, add], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # add => add # out_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {}) triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + (x0), None) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tl.store(in_out_ptr0 + (x0), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(primals_1, buf0, 16384, grid=grid(16384), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 16384, grid=grid(16384), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [out_3, add], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_2.run(buf4, primals_5, primals_1, 16384, grid=grid(16384), stream=stream0) del primals_1 del primals_5 return (buf4, primals_2, primals_4, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ImpalaResidual(nn.Module): """ A residual block for an IMPALA CNN. """ def __init__(self, depth): super().__init__() self.conv1 = nn.Conv2d(depth, depth, 3, padding=1) self.conv2 = nn.Conv2d(depth, depth, 3, padding=1) def forward(self, x): out = F.relu(x) out = self.conv1(out) out = F.relu(out) out = self.conv2(out) return out + x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {'depth': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + x0, None) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tl.store(in_out_ptr0 + x0, tmp5, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(16384)](primals_1, buf0, 16384, XBLOCK =128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(16384)](buf2, primals_3, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf4 = buf3 del buf3 triton_poi_fused_add_convolution_2[grid(16384)](buf4, primals_5, primals_1, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_5 return buf4, primals_2, primals_4, buf0, buf2 class ImpalaResidualNew(nn.Module): """ A residual block for an IMPALA CNN. """ def __init__(self, depth): super().__init__() self.conv1 = nn.Conv2d(depth, depth, 3, padding=1) self.conv2 = nn.Conv2d(depth, depth, 3, padding=1) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
PacktPublishing/Hands-On-Reinforcement-Learning-for-Games
ImpalaResidual
false
8,670
[ "MIT" ]
41
045b8846f2558aa8fb8ac8cef5c71ee098cb9b22
https://github.com/PacktPublishing/Hands-On-Reinforcement-Learning-for-Games/tree/045b8846f2558aa8fb8ac8cef5c71ee098cb9b22
distLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/gm/cgmu3hqtju4n2ybutawn42cbtgivpwminncmzeooab62ilot5bir.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => pow_3, pow_4, sum_2 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {}) # %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) triton_poi_fused__weight_norm_interface_0 = async_compile.triton('triton_poi_fused__weight_norm_interface_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/g7/cg7ft2rl6icxu4ysufidys6qejwq3jtdlwig7h3gbbkptycrwfrb.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => div_1, mul # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_4), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div_1), kwargs = {}) triton_poi_fused__weight_norm_interface_1 = async_compile.triton('triton_poi_fused__weight_norm_interface_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/lw/clwiyvhkotmmjiqeu7iix2vhsrrf7xlnnqw3el765ylnx7l2g3z6.py # Topologically Sorted Source Nodes: [add, x_normalized], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add => add # x_normalized => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {}) triton_poi_fused_add_div_2 = async_compile.triton('triton_poi_fused_add_div_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/rx/crx7m54f3zf4yi4dz2liyhfdpu3emcybecxg5qemggdgtyba2yvv.py # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mul] # Source node to ATen node mapping: # scores => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 2), kwargs = {}) triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] stream0 = get_raw_stream(0) triton_poi_fused__weight_norm_interface_0.run(primals_3, buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] triton_poi_fused__weight_norm_interface_1.run(primals_3, primals_2, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, x_normalized], Original ATen: [aten.add, aten.div] triton_poi_fused_add_div_2.run(primals_1, buf2, 256, grid=grid(256), stream=stream0) del primals_1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cos_dist], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mul] triton_poi_fused_mul_3.run(buf4, 256, grid=grid(256), stream=stream0) return (buf4, buf1, primals_2, primals_3, buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn.utils.weight_norm import WeightNorm class distLinear(nn.Module): def __init__(self, indim, outdim): super(distLinear, self).__init__() self.L = nn.Linear(indim, outdim, bias=False) self.class_wise_learnable_norm = True if self.class_wise_learnable_norm: WeightNorm.apply(self.L, 'weight', dim=0) if outdim <= 200: self.scale_factor = 2 else: self.scale_factor = 10 def forward(self, x): x_norm = torch.norm(x, p=2, dim=1).unsqueeze(1).expand_as(x) x_normalized = x.div(x_norm + 1e-05) if not self.class_wise_learnable_norm: L_norm = torch.norm(self.L.weight.data, p=2, dim=1).unsqueeze(1 ).expand_as(self.L.weight.data) self.L.weight.data = self.L.weight.data.div(L_norm + 1e-05) cos_dist = self.L(x_normalized) scores = self.scale_factor * cos_dist return scores def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'indim': 4, 'outdim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.utils.weight_norm import WeightNorm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_div_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1), (1, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) get_raw_stream(0) triton_poi_fused__weight_norm_interface_0[grid(4)](primals_3, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__weight_norm_interface_1[grid(16)](primals_3, primals_2, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_2[grid(256)](primals_1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_mul_3[grid(256)](buf4, 256, XBLOCK=128, num_warps= 4, num_stages=1) return buf4, buf1, primals_2, primals_3, buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0) class distLinearNew(nn.Module): def __init__(self, indim, outdim): super(distLinearNew, self).__init__() self.L = nn.Linear(indim, outdim, bias=False) self.class_wise_learnable_norm = True if self.class_wise_learnable_norm: WeightNorm.apply(self.L, 'weight', dim=0) if outdim <= 200: self.scale_factor = 2 else: self.scale_factor = 10 def forward(self, input_0): primals_2 = self.L.weight_g primals_3 = self.L.weight_v primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RafLaf/easy
distLinear
false
8,671
[ "MIT" ]
25
3e3603aef7dfb1cf469820330d695b93ba76dfd4
https://github.com/RafLaf/easy/tree/3e3603aef7dfb1cf469820330d695b93ba76dfd4
SelfAttentionLayer2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/2m/c2mkmylxu3vdmqgiibpy6bwbw7aqe2olw2svraqjjcfitwc6dayw.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_2, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/57/c57am5fxjlxtumd4udah53kjn6yrsnl46yg37giczde4ocgcuypi.py # Topologically Sorted Source Nodes: [attention, attention_1], Original ATen: [aten._softmax, aten.mean] # Source node to ATen node mapping: # attention => div_1, sum_1 # attention_1 => mean # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%div_1, [0]), kwargs = {}) triton_poi_fused__softmax_mean_1 = async_compile.triton('triton_poi_fused__softmax_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr0 + (1)) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (2)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (3)) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (4 + x0), xmask) tmp14 = tl.load(in_ptr0 + (4)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp16 = tl.load(in_ptr0 + (5)) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp19 = tl.load(in_ptr0 + (6)) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp22 = tl.load(in_ptr0 + (7)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp27 = tl.load(in_ptr0 + (8 + x0), xmask) tmp28 = tl.load(in_ptr0 + (8)) tmp29 = tl.broadcast_to(tmp28, [XBLOCK]) tmp30 = tl.load(in_ptr0 + (9)) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp33 = tl.load(in_ptr0 + (10)) tmp34 = tl.broadcast_to(tmp33, [XBLOCK]) tmp36 = tl.load(in_ptr0 + (11)) tmp37 = tl.broadcast_to(tmp36, [XBLOCK]) tmp41 = tl.load(in_ptr0 + (12 + x0), xmask) tmp42 = tl.load(in_ptr0 + (12)) tmp43 = tl.broadcast_to(tmp42, [XBLOCK]) tmp44 = tl.load(in_ptr0 + (13)) tmp45 = tl.broadcast_to(tmp44, [XBLOCK]) tmp47 = tl.load(in_ptr0 + (14)) tmp48 = tl.broadcast_to(tmp47, [XBLOCK]) tmp50 = tl.load(in_ptr0 + (15)) tmp51 = tl.broadcast_to(tmp50, [XBLOCK]) tmp5 = tmp2 + tmp4 tmp8 = tmp5 + tmp7 tmp11 = tmp8 + tmp10 tmp12 = tmp0 / tmp11 tmp18 = tmp15 + tmp17 tmp21 = tmp18 + tmp20 tmp24 = tmp21 + tmp23 tmp25 = tmp13 / tmp24 tmp26 = tmp12 + tmp25 tmp32 = tmp29 + tmp31 tmp35 = tmp32 + tmp34 tmp38 = tmp35 + tmp37 tmp39 = tmp27 / tmp38 tmp40 = tmp26 + tmp39 tmp46 = tmp43 + tmp45 tmp49 = tmp46 + tmp48 tmp52 = tmp49 + tmp51 tmp53 = tmp41 / tmp52 tmp54 = tmp40 + tmp53 tmp55 = 4.0 tmp56 = tmp54 / tmp55 tl.store(out_ptr0 + (x0), tmp56, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [q], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [k], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [attention, attention_1], Original ATen: [aten._softmax, aten.mean] triton_poi_fused__softmax_mean_1.run(buf3, buf4, 4, grid=grid(4), stream=stream0) del buf3 buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf4, (1, 4), (4, 1), 0), primals_1, out=buf5) del buf4 return (reinterpret_tensor(buf5, (4, ), (1, ), 0), buf0, buf2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import * class SelfAttentionLayer2(nn.Module): def __init__(self, dim, da): super(SelfAttentionLayer2, self).__init__() self.dim = dim self.Wq = nn.Parameter(torch.zeros(self.dim, self.dim)) self.Wk = nn.Parameter(torch.zeros(self.dim, self.dim)) nn.init.xavier_uniform_(self.Wq.data, gain=1.414) nn.init.xavier_uniform_(self.Wk.data, gain=1.414) def forward(self, h): h.shape[0] assert self.dim == h.shape[1] q = torch.matmul(h, self.Wq) k = torch.matmul(h, self.Wk) e = torch.matmul(q, k.t()) / math.sqrt(self.dim) attention = F.softmax(e, dim=1) attention = attention.mean(dim=0) x = torch.matmul(attention, h) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'da': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.utils.data import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr0 + 1) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp6 = tl.load(in_ptr0 + 2) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp9 = tl.load(in_ptr0 + 3) tmp10 = tl.broadcast_to(tmp9, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (4 + x0), xmask) tmp14 = tl.load(in_ptr0 + 4) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp16 = tl.load(in_ptr0 + 5) tmp17 = tl.broadcast_to(tmp16, [XBLOCK]) tmp19 = tl.load(in_ptr0 + 6) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp22 = tl.load(in_ptr0 + 7) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp27 = tl.load(in_ptr0 + (8 + x0), xmask) tmp28 = tl.load(in_ptr0 + 8) tmp29 = tl.broadcast_to(tmp28, [XBLOCK]) tmp30 = tl.load(in_ptr0 + 9) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp33 = tl.load(in_ptr0 + 10) tmp34 = tl.broadcast_to(tmp33, [XBLOCK]) tmp36 = tl.load(in_ptr0 + 11) tmp37 = tl.broadcast_to(tmp36, [XBLOCK]) tmp41 = tl.load(in_ptr0 + (12 + x0), xmask) tmp42 = tl.load(in_ptr0 + 12) tmp43 = tl.broadcast_to(tmp42, [XBLOCK]) tmp44 = tl.load(in_ptr0 + 13) tmp45 = tl.broadcast_to(tmp44, [XBLOCK]) tmp47 = tl.load(in_ptr0 + 14) tmp48 = tl.broadcast_to(tmp47, [XBLOCK]) tmp50 = tl.load(in_ptr0 + 15) tmp51 = tl.broadcast_to(tmp50, [XBLOCK]) tmp5 = tmp2 + tmp4 tmp8 = tmp5 + tmp7 tmp11 = tmp8 + tmp10 tmp12 = tmp0 / tmp11 tmp18 = tmp15 + tmp17 tmp21 = tmp18 + tmp20 tmp24 = tmp21 + tmp23 tmp25 = tmp13 / tmp24 tmp26 = tmp12 + tmp25 tmp32 = tmp29 + tmp31 tmp35 = tmp32 + tmp34 tmp38 = tmp35 + tmp37 tmp39 = tmp27 / tmp38 tmp40 = tmp26 + tmp39 tmp46 = tmp43 + tmp45 tmp49 = tmp46 + tmp48 tmp52 = tmp49 + tmp51 tmp53 = tmp41 / tmp52 tmp54 = tmp40 + tmp53 tmp55 = 4.0 tmp56 = tmp54 / tmp55 tl.store(out_ptr0 + x0, tmp56, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__softmax_mean_1[grid(4)](buf3, buf4, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf3 buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (1, 4), (4, 1), 0), primals_1, out=buf5) del buf4 return reinterpret_tensor(buf5, (4,), (1,), 0 ), buf0, buf2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), buf1 class SelfAttentionLayer2New(nn.Module): def __init__(self, dim, da): super(SelfAttentionLayer2New, self).__init__() self.dim = dim self.Wq = nn.Parameter(torch.zeros(self.dim, self.dim)) self.Wk = nn.Parameter(torch.zeros(self.dim, self.dim)) nn.init.xavier_uniform_(self.Wq.data, gain=1.414) nn.init.xavier_uniform_(self.Wk.data, gain=1.414) def forward(self, input_0): primals_1 = self.Wq primals_2 = self.Wk primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RUCAIBox/TG_CRS_Code
SelfAttentionLayer2
false
8,672
[ "Apache-2.0" ]
27
0428a3a069c4d0d4888f2d476dba2cafd7918524
https://github.com/RUCAIBox/TG_CRS_Code/tree/0428a3a069c4d0d4888f2d476dba2cafd7918524
NoiseLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/f2/cf24cmeulw23di2tkqr2p42z4b2vfmfp7ttdubvvgls4x2ofygit.py # Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # x => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %randn), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [noise], Original ATen: [aten.randn] buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_1, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn class NoiseLayer(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, x, noise=None): if noise is None and self.noise is None: noise = torch.randn(x.size(0), 1, x.size(2), x.size(3), device= x.device, dtype=x.dtype) elif noise is None: noise = self.noise x = x + self.weight.view(1, -1, 1, 1) * noise return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf2, buf1 class NoiseLayerNew(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Qingyang-Xu/GANInversion_with_ConsecutiveImgs
NoiseLayer
false
8,673
[ "MIT" ]
23
9078a48ec3474dacdd02693b051e3addef1c5697
https://github.com/Qingyang-Xu/GANInversion_with_ConsecutiveImgs/tree/9078a48ec3474dacdd02693b051e3addef1c5697
CNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/dj/cdjkhg45ivv5a6ejeml5ldcx4tqxi7pustsuryeeqqe62ewtx2es.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/od/cod4il4o6fstt7pdxuq2vsgbuijn537bpftop2epnumxmn2hxklq.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 3600 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (3600*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (y0 + (64*x2) + (230400*y1)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/r2/cr2osohet6hjgw5l2hwecdhhk5nqreh62chj5cedwyx254d3suov.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) % 30 x2 = (xindex // 1920) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (7680*x2)), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (7680*x2)), xmask) tmp3 = tl.load(in_ptr0 + (3840 + x0 + (128*x1) + (7680*x2)), xmask) tmp5 = tl.load(in_ptr0 + (3904 + x0 + (128*x1) + (7680*x2)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/lo/clobpdny5vwozhcmjllb3l7v4ybo6bwth7a5e4aupowe6kez6rrk.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 346112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/dn/cdnkesmgcnlfdcxbvwgm3as4jkhiy3c4nca2674qaqrov77zj6b6.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 128], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 676 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 13 y1 = (yindex // 13) y5 = yindex y4 = (yindex // 169) y6 = yindex % 169 tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (6656*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (6656*y1)), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (3328 + x2 + (256*y0) + (6656*y1)), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3456 + x2 + (256*y0) + (6656*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + (169*x2) + (21632*y4)), tmp16, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/75/c75as7lvhmwjwyvudewxaugtk6dgtcrr6vcxsj5doq44zmbnjkmv.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (64, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (512, 21632), (21632, 1)) assert_size_stride(primals_7, (512, ), (1, )) assert_size_stride(primals_8, (4, 512), (512, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_4, buf0, 8192, 25, grid=grid(8192, 25), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 60, 60), (230400, 3600, 60, 1)) buf2 = empty_strided_cuda((4, 64, 60, 60), (230400, 1, 3840, 64), torch.float32) # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf1, primals_2, buf2, 256, 3600, grid=grid(256, 3600), stream=stream0) del buf1 del primals_2 buf3 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64), torch.float32) buf4 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_2.run(buf2, buf3, buf4, 230400, grid=grid(230400), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 26, 26), (86528, 1, 3328, 128)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf6, primals_5, 346112, grid=grid(346112), stream=stream0) del primals_5 buf7 = empty_strided_cuda((4, 128, 13, 13), (21632, 1, 1664, 128), torch.int8) buf8 = empty_strided_cuda((4, 128, 13, 13), (21632, 169, 13, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_4.run(buf6, buf7, buf8, 676, 128, grid=grid(676, 128), stream=stream0) buf9 = empty_strided_cuda((4, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf8, (4, 21632), (21632, 1), 0), reinterpret_tensor(primals_6, (21632, 512), (1, 21632), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_5.run(buf10, primals_7, 2048, grid=grid(2048), stream=stream0) del primals_7 buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf11) del primals_9 return (buf11, primals_1, primals_3, buf0, buf2, buf3, buf4, buf6, buf7, reinterpret_tensor(buf8, (4, 21632), (21632, 1), 0), buf10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((512, 21632), (21632, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class CNN(nn.Module): def __init__(self, num_classes): super(CNN, self).__init__() self.conv1 = nn.Conv2d(1, 64, 5) self.mp1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(64, 128, 5) self.mp2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(128 * 13 * 13, 512) self.fc2 = nn.Linear(512, num_classes) def forward(self, x): x = self.mp1(F.relu(self.conv1(x))) x = self.mp2(F.relu(self.conv2(x))) x = x.view(-1, 128 * 13 * 13) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 3600 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 3600 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (y0 + 64 * x2 + 230400 * y1), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 30 x2 = xindex // 1920 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 7680 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 7680 * x2), xmask) tmp3 = tl.load(in_ptr0 + (3840 + x0 + 128 * x1 + 7680 * x2), xmask) tmp5 = tl.load(in_ptr0 + (3904 + x0 + 128 * x1 + 7680 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 676 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 13 y1 = yindex // 13 y5 = yindex y4 = yindex // 169 y6 = yindex % 169 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 6656 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 6656 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (3328 + x2 + 256 * y0 + 6656 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3456 + x2 + 256 * y0 + 6656 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 169 * x2 + 21632 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (512, 21632), (21632, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (4, 512), (512, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(8192, 25)](primals_4, buf0, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf1 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 60, 60), (230400, 3600, 60, 1)) buf2 = empty_strided_cuda((4, 64, 60, 60), (230400, 1, 3840, 64), torch.float32) triton_poi_fused_convolution_relu_1[grid(256, 3600)](buf1, primals_2, buf2, 256, 3600, XBLOCK=16, YBLOCK=256, num_warps=8, num_stages=1) del buf1 del primals_2 buf3 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64), torch.float32) buf4 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_2[grid(230400)](buf2, buf3, buf4, 230400, XBLOCK=512, num_warps=8, num_stages=1) buf5 = extern_kernels.convolution(buf3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 26, 26), (86528, 1, 3328, 128)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_3[grid(346112)](buf6, primals_5, 346112, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf7 = empty_strided_cuda((4, 128, 13, 13), (21632, 1, 1664, 128), torch.int8) buf8 = empty_strided_cuda((4, 128, 13, 13), (21632, 169, 13, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_4[grid(676, 128)](buf6, buf7, buf8, 676, 128, XBLOCK=128, YBLOCK=8, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (4, 21632), (21632, 1), 0), reinterpret_tensor(primals_6, (21632, 512), (1, 21632), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_relu_5[grid(2048)](buf10, primals_7, 2048, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf10, reinterpret_tensor(primals_8, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf11) del primals_9 return (buf11, primals_1, primals_3, buf0, buf2, buf3, buf4, buf6, buf7, reinterpret_tensor(buf8, (4, 21632), (21632, 1), 0), buf10, primals_8, primals_6) class CNNNew(nn.Module): def __init__(self, num_classes): super(CNNNew, self).__init__() self.conv1 = nn.Conv2d(1, 64, 5) self.mp1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(64, 128, 5) self.mp2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(128 * 13 * 13, 512) self.fc2 = nn.Linear(512, num_classes) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Psarpei/Handwritten-Text-Recognition
CNN
false
8,674
[ "MIT" ]
15
be8f12092e385f3e117ae79b08fb06d0681f67e3
https://github.com/Psarpei/Handwritten-Text-Recognition/tree/be8f12092e385f3e117ae79b08fb06d0681f67e3
SelfAttentionLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/fo/cfoh3s3ia4ck5avvrnsmmwskgjkvbl2oxg7ej2ft7yp44x456oxw.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%mm,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/of/cofdihcfzkv547snwzgvqvyiigpy7uwenhhhil3aocevammieixm.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tmp5 / tmp8 tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm] extern_kernels.mm(buf1, primals_3, out=buf2) buf5 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf2, buf5, 1, 4, grid=grid(1), stream=stream0) buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf5, (1, 4), (0, 1), 0), primals_1, out=buf6) del buf5 return (reinterpret_tensor(buf6, (4, ), (1, ), 0), buf1, buf2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import * class SelfAttentionLayer(nn.Module): def __init__(self, dim, da, alpha=0.2, dropout=0.5): super(SelfAttentionLayer, self).__init__() self.dim = dim self.da = da self.alpha = alpha self.dropout = dropout self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da))) self.b = nn.Parameter(torch.zeros(size=(self.da, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) nn.init.xavier_uniform_(self.b.data, gain=1.414) def forward(self, h): h.shape[0] assert self.dim == h.shape[1] e = torch.matmul(torch.tanh(torch.matmul(h, self.a)), self.b).squeeze( dim=1) attention = F.softmax(e) return torch.matmul(attention, h) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'da': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.utils.data import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tmp5 / tmp8 tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, primals_3, out=buf2) buf5 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused__softmax_1[grid(1)](buf2, buf5, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (1, 4), (0, 1), 0), primals_1, out=buf6) del buf5 return reinterpret_tensor(buf6, (4,), (1,), 0 ), buf1, buf2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0) class SelfAttentionLayerNew(nn.Module): def __init__(self, dim, da, alpha=0.2, dropout=0.5): super(SelfAttentionLayerNew, self).__init__() self.dim = dim self.da = da self.alpha = alpha self.dropout = dropout self.a = nn.Parameter(torch.zeros(size=(self.dim, self.da))) self.b = nn.Parameter(torch.zeros(size=(self.da, 1))) nn.init.xavier_uniform_(self.a.data, gain=1.414) nn.init.xavier_uniform_(self.b.data, gain=1.414) def forward(self, input_0): primals_1 = self.a primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RUCAIBox/TG_CRS_Code
SelfAttentionLayer
false
8,675
[ "Apache-2.0" ]
27
0428a3a069c4d0d4888f2d476dba2cafd7918524
https://github.com/RUCAIBox/TG_CRS_Code/tree/0428a3a069c4d0d4888f2d476dba2cafd7918524
StddevLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/3t/c3tr4uj7sasetbjigtjqgfay7jmhop6y6ymif53xyziuxrunqmf7.py # Topologically Sorted Source Nodes: [mean, y_1, pow_1, y_2, add, y_3, mean_2, z], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.cat] # Source node to ATen node mapping: # add => add # mean => mean # mean_2 => mean_2 # pow_1 => pow_1 # y_1 => sub # y_2 => mean_1 # y_3 => pow_2 # z => cat # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-08), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_2, [3, 4, 5], True), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %view_1], 1), kwargs = {}) triton_per_fused_add_cat_mean_pow_sub_0 = async_compile.triton('triton_per_fused_add_cat_mean_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_cat_mean_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_cat_mean_pow_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + (tl.broadcast_to(r1 + (80*r2), [XBLOCK, RBLOCK])), tmp28, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/uw/cuwbof5ypfifmq2ruxcadtdzir6c2cgmgwoo2zjn7d3qw5rccuaf.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] # Source node to ATen node mapping: # z => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %view_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (80*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias # Topologically Sorted Source Nodes: [mean, y_1, pow_1, y_2, add, y_3, mean_2, z], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.cat] stream0 = get_raw_stream(0) triton_per_fused_add_cat_mean_pow_sub_0.run(arg0_1, buf2, 1, 64, grid=grid(1), stream=stream0) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn class StddevLayer(nn.Module): def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = 4 self.num_new_features = 1 def forward(self, x): b, c, h, w = x.shape group_size = min(self.group_size, b) y = x.reshape([group_size, -1, self.num_new_features, c // self. num_new_features, h, w]) y = y - y.mean(0, keepdim=True) y = (y ** 2).mean(0, keepdim=True) y = (y + 1e-08) ** 0.5 y = y.mean([3, 4, 5], keepdim=True).squeeze(3) y = y.expand(group_size, -1, -1, h, w).clone().reshape(b, self. num_new_features, h, w) z = torch.cat([x, y], dim=1) return z def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_cat_mean_pow_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp28, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_cat_mean_pow_sub_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf3, class StddevLayerNew(nn.Module): def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = 4 self.num_new_features = 1 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Qingyang-Xu/GANInversion_with_ConsecutiveImgs
StddevLayer
false
8,676
[ "MIT" ]
23
9078a48ec3474dacdd02693b051e3addef1c5697
https://github.com/Qingyang-Xu/GANInversion_with_ConsecutiveImgs/tree/9078a48ec3474dacdd02693b051e3addef1c5697
SoftCrossEntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/ft/cftrwlullhwkxnfufrr4t7xseyg5lxqbr7donc6njagypfpftnau.py # Topologically Sorted Source Nodes: [loss, loss_1, loss_2, loss_3], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # loss => amax, sub # loss_1 => amax_1, sub_2 # loss_2 => amax_2, sub_4 # loss_3 => amax_3, sub_6 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_1), kwargs = {}) # %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_2), kwargs = {}) # %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_3), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) tl.store(out_ptr2 + (x2), tmp8, xmask) tl.store(out_ptr3 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ka/ckarwoj6g5afhnyz3wbhzq4egowxjevrbzpbamjybwm4yabyx2lm.py # Topologically Sorted Source Nodes: [loss, losses_1, loss_1, mul_1, losses_2, loss_2, mul_2, losses_3, loss_3, mul_3, losses_4, losses_5], Original ATen: [aten.nll_loss_forward, aten.add, aten.mul, aten.mean] # Source node to ATen node mapping: # loss => full_default_2, full_default_3, neg, where_1 # loss_1 => full_default_5, full_default_6, neg_1, where_3 # loss_2 => full_default_8, full_default_9, neg_2, where_5 # loss_3 => full_default_11, full_default_12, neg_3, where_7 # losses_1 => mul # losses_2 => add_1 # losses_3 => add_2 # losses_4 => add_3 # losses_5 => mean # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # Graph fragment: # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], True), kwargs = {dtype: torch.bool, layout: torch.strided, device: cuda:0, pin_memory: False}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%full_default_2, %neg, %full_default_3), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %where_1), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], True), kwargs = {dtype: torch.bool, layout: torch.strided, device: cuda:0, pin_memory: False}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze_1,), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%full_default_5, %neg_1, %full_default_6), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %where_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], True), kwargs = {dtype: torch.bool, layout: torch.strided, device: cuda:0, pin_memory: False}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze_2,), kwargs = {}) # %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%full_default_8, %neg_2, %full_default_9), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %where_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {}) # %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], True), kwargs = {dtype: torch.bool, layout: torch.strided, device: cuda:0, pin_memory: False}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze_3,), kwargs = {}) # %full_default_12 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%full_default_11, %neg_3, %full_default_12), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %where_7), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_3,), kwargs = {}) triton_per_fused_add_mean_mul_nll_loss_forward_1 = async_compile.triton('triton_per_fused_add_mean_mul_nll_loss_forward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=(6,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_nll_loss_forward_1', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_nll_loss_forward_1(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr3 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr3 + (4*r0), None, eviction_policy='evict_last') tmp41 = tl.load(in_ptr3 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr3 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp55 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr4 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp57 = tl.load(in_ptr4 + (4*r0), None, eviction_policy='evict_last') tmp59 = tl.load(in_ptr4 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr4 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp1 - tmp12 tmp14 = -tmp13 tmp15 = tl.full([1, 1], True, tl.int1) tmp16 = 0.0 tmp17 = tl.where(tmp15, tmp14, tmp16) tmp18 = tmp0 * tmp17 tmp22 = tl_math.exp(tmp21) tmp23 = tl_math.exp(tmp20) tmp24 = tmp22 + tmp23 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tl_math.log(tmp30) tmp32 = tmp20 - tmp31 tmp33 = -tmp32 tmp34 = tl.where(tmp15, tmp33, tmp16) tmp35 = tmp19 * tmp34 tmp36 = tmp18 + tmp35 tmp40 = tl_math.exp(tmp39) tmp42 = tl_math.exp(tmp41) tmp43 = tmp40 + tmp42 tmp44 = tl_math.exp(tmp38) tmp45 = tmp43 + tmp44 tmp47 = tl_math.exp(tmp46) tmp48 = tmp45 + tmp47 tmp49 = tl_math.log(tmp48) tmp50 = tmp38 - tmp49 tmp51 = -tmp50 tmp52 = tl.where(tmp15, tmp51, tmp16) tmp53 = tmp37 * tmp52 tmp54 = tmp36 + tmp53 tmp58 = tl_math.exp(tmp57) tmp60 = tl_math.exp(tmp59) tmp61 = tmp58 + tmp60 tmp63 = tl_math.exp(tmp62) tmp64 = tmp61 + tmp63 tmp65 = tl_math.exp(tmp56) tmp66 = tmp64 + tmp65 tmp67 = tl_math.log(tmp66) tmp68 = tmp56 - tmp67 tmp69 = -tmp68 tmp70 = tl.where(tmp15, tmp69, tmp16) tmp71 = tmp55 * tmp70 tmp72 = tmp54 + tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = tl.sum(tmp73, 1)[:, None] tmp76 = 4.0 tmp77 = tmp75 / tmp76 tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp77, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss, loss_1, loss_2, loss_3], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, buf1, buf3, buf4, 16, grid=grid(16), stream=stream0) del arg0_1 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [loss, losses_1, loss_1, mul_1, losses_2, loss_2, mul_2, losses_3, loss_3, mul_3, losses_4, losses_5], Original ATen: [aten.nll_loss_forward, aten.add, aten.mul, aten.mean] triton_per_fused_add_mean_mul_nll_loss_forward_1.run(buf7, arg1_1, buf0, buf1, buf3, buf4, 1, 4, grid=grid(1), stream=stream0) del arg1_1 del buf0 del buf1 del buf3 del buf4 return (buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from torch.backends import cudnn as cudnn from torch import nn as nn from torch.nn import functional as F from torch.nn import init as init from typing import List class SoftCrossEntropyLoss(nn.Module): """Calculate the CrossEntropyLoss with soft targets. :param weight: Weight to assign to each of the classes. Default: None :type weight: list of float :param reduction: The way to reduce the losses: 'none' | 'mean' | 'sum'. 'none': no reduction, 'mean': the mean of the losses, 'sum': the sum of the losses. :type reduction: str """ def __init__(self, weight: 'List[float]'=None, reduction: 'str'='mean'): super().__init__() if weight is None: self.weight = None else: self.register_buffer('weight', torch.Tensor(weight)) self.reduction = reduction def forward(self, input: 'Tensor', target: 'Tensor') ->Tensor: """Calculate the loss. :param input: prediction logits :param target: target probabilities :return: loss """ n, k = input.shape losses = input.new_zeros(n) for i in range(k): cls_idx = input.new_full((n,), i, dtype=torch.long) loss = F.cross_entropy(input, cls_idx, reduction='none') if self.weight is not None: loss = loss * self.weight[i] losses += target[:, i].float() * loss if self.reduction == 'mean': losses = losses.mean() elif self.reduction == 'sum': losses = losses.sum() elif self.reduction != 'none': raise ValueError(f'Unrecognized reduction: {self.reduction}') return losses def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.backends import cudnn as cudnn from torch import nn as nn from torch.nn import init as init from typing import List assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) tl.store(out_ptr2 + x2, tmp8, xmask) tl.store(out_ptr3 + x2, tmp8, xmask) @triton.jit def triton_per_fused_add_mean_mul_nll_loss_forward_1(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr3 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr3 + 4 * r0, None, eviction_policy='evict_last') tmp41 = tl.load(in_ptr3 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr3 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp55 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr4 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp57 = tl.load(in_ptr4 + 4 * r0, None, eviction_policy='evict_last') tmp59 = tl.load(in_ptr4 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr4 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp1 - tmp12 tmp14 = -tmp13 tmp15 = tl.full([1, 1], True, tl.int1) tmp16 = 0.0 tmp17 = tl.where(tmp15, tmp14, tmp16) tmp18 = tmp0 * tmp17 tmp22 = tl_math.exp(tmp21) tmp23 = tl_math.exp(tmp20) tmp24 = tmp22 + tmp23 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tl_math.log(tmp30) tmp32 = tmp20 - tmp31 tmp33 = -tmp32 tmp34 = tl.where(tmp15, tmp33, tmp16) tmp35 = tmp19 * tmp34 tmp36 = tmp18 + tmp35 tmp40 = tl_math.exp(tmp39) tmp42 = tl_math.exp(tmp41) tmp43 = tmp40 + tmp42 tmp44 = tl_math.exp(tmp38) tmp45 = tmp43 + tmp44 tmp47 = tl_math.exp(tmp46) tmp48 = tmp45 + tmp47 tmp49 = tl_math.log(tmp48) tmp50 = tmp38 - tmp49 tmp51 = -tmp50 tmp52 = tl.where(tmp15, tmp51, tmp16) tmp53 = tmp37 * tmp52 tmp54 = tmp36 + tmp53 tmp58 = tl_math.exp(tmp57) tmp60 = tl_math.exp(tmp59) tmp61 = tmp58 + tmp60 tmp63 = tl_math.exp(tmp62) tmp64 = tmp61 + tmp63 tmp65 = tl_math.exp(tmp56) tmp66 = tmp64 + tmp65 tmp67 = tl_math.log(tmp66) tmp68 = tmp56 - tmp67 tmp69 = -tmp68 tmp70 = tl.where(tmp15, tmp69, tmp16) tmp71 = tmp55 * tmp70 tmp72 = tmp54 + tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = tl.sum(tmp73, 1)[:, None] tmp76 = 4.0 tmp77 = tmp75 / tmp76 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp77, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(16)](arg0_1, buf0, buf1, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6 del buf6 triton_per_fused_add_mean_mul_nll_loss_forward_1[grid(1)](buf7, arg1_1, buf0, buf1, buf3, buf4, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 del buf1 del buf3 del buf4 return buf7, class SoftCrossEntropyLossNew(nn.Module): """Calculate the CrossEntropyLoss with soft targets. :param weight: Weight to assign to each of the classes. Default: None :type weight: list of float :param reduction: The way to reduce the losses: 'none' | 'mean' | 'sum'. 'none': no reduction, 'mean': the mean of the losses, 'sum': the sum of the losses. :type reduction: str """ def __init__(self, weight: 'List[float]'=None, reduction: 'str'='mean'): super().__init__() if weight is None: self.weight = None else: self.register_buffer('weight', torch.Tensor(weight)) self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PushparajaMurugan/dauphin
SoftCrossEntropyLoss
false
8,677
[ "Apache-2.0" ]
18
4d9832c72288282e6b3d03be1b0ad8708282b005
https://github.com/PushparajaMurugan/dauphin/tree/4d9832c72288282e6b3d03be1b0ad8708282b005
CoralLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/td/ctd6ammldw3l7uemlxudkyz4z2sknl2cmlzlvhzpw2uvsvbxf6tg.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) x0 = xindex % 3 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf0, primals_3, buf1, 192, grid=grid(192), stream=stream0) del buf0 del primals_3 return (buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class CoralLayer(torch.nn.Module): """ Implements CORAL layer described in Cao, Mirjalili, and Raschka (2020) *Rank Consistent Ordinal Regression for Neural Networks with Application to Age Estimation* Pattern Recognition Letters, https://doi.org/10.1016/j.patrec.2020.11.008 Parameters ----------- size_in : int Number of input features for the inputs to the forward method, which are expected to have shape=(num_examples, num_features). num_classes : int Number of classes in the dataset. preinit_bias : bool (default=True) If true, it will pre-initialize the biases to descending values in [0, 1] range instead of initializing it to all zeros. This pre- initialization scheme results in faster learning and better generalization performance in practice. """ def __init__(self, size_in, num_classes, preinit_bias=True): super().__init__() self.size_in, self.size_out = size_in, 1 self.coral_weights = torch.nn.Linear(self.size_in, 1, bias=False) if preinit_bias: self.coral_bias = torch.nn.Parameter(torch.arange(num_classes - 1, 0, -1).float() / (num_classes - 1)) else: self.coral_bias = torch.nn.Parameter(torch.zeros(num_classes - 1).float()) def forward(self, x): """ Computes forward pass. Parameters ----------- x : torch.tensor, shape=(num_examples, num_features) Input features. Returns ----------- logits : torch.tensor, shape=(num_examples, num_classes-1) """ return self.coral_weights(x) + self.coral_bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'size_in': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 x0 = xindex % 3 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(192)](buf0, primals_3, buf1, 192, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_3 return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0) class CoralLayerNew(torch.nn.Module): """ Implements CORAL layer described in Cao, Mirjalili, and Raschka (2020) *Rank Consistent Ordinal Regression for Neural Networks with Application to Age Estimation* Pattern Recognition Letters, https://doi.org/10.1016/j.patrec.2020.11.008 Parameters ----------- size_in : int Number of input features for the inputs to the forward method, which are expected to have shape=(num_examples, num_features). num_classes : int Number of classes in the dataset. preinit_bias : bool (default=True) If true, it will pre-initialize the biases to descending values in [0, 1] range instead of initializing it to all zeros. This pre- initialization scheme results in faster learning and better generalization performance in practice. """ def __init__(self, size_in, num_classes, preinit_bias=True): super().__init__() self.size_in, self.size_out = size_in, 1 self.coral_weights = torch.nn.Linear(self.size_in, 1, bias=False) if preinit_bias: self.coral_bias = torch.nn.Parameter(torch.arange(num_classes - 1, 0, -1).float() / (num_classes - 1)) else: self.coral_bias = torch.nn.Parameter(torch.zeros(num_classes - 1).float()) def forward(self, input_0): primals_3 = self.coral_bias primals_1 = self.coral_weights.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Raschka-research-group/coral-pytorch
CoralLayer
false
8,678
[ "MIT" ]
32
6b85e287118476095bac85d6f3dabc6ffb89a326
https://github.com/Raschka-research-group/coral-pytorch/tree/6b85e287118476095bac85d6f3dabc6ffb89a326
AconC
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/i7/ci7jnsodl7phu77auirvlkt6ax4qmbf2anemcnfruwgvpe564wp6.py # Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub] # Source node to ATen node mapping: # sub => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/wr/cwrqsdqh7wbszr25dfjlyc5c47k7hmo7zrnnucersu6whmoire6j.py # Topologically Sorted Source Nodes: [dpx, mul_1, sigmoid, mul_2, mul_3, add], Original ATen: [aten.mul, aten.sigmoid, aten.add] # Source node to ATen node mapping: # add => add # dpx => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # sigmoid => sigmoid # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %mul), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sigmoid), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp6 = tmp2 * tmp5 tmp8 = tmp7 * tmp1 tmp9 = tmp6 + tmp8 tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_sub_0.run(primals_1, primals_2, buf0, 4, grid=grid(4), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dpx, mul_1, sigmoid, mul_2, mul_3, add], Original ATen: [aten.mul, aten.sigmoid, aten.add] triton_poi_fused_add_mul_sigmoid_1.run(buf0, primals_3, primals_4, primals_2, buf1, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_3, primals_4, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AconC(nn.Module): """ ACON activation (activate or not). AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. """ def __init__(self, c1): super().__init__() self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) def forward(self, x): dpx = (self.p1 - self.p2) * x return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c1': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp6 = tmp2 * tmp5 tmp8 = tmp7 * tmp1 tmp9 = tmp6 + tmp8 tl.store(out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(4)](primals_1, primals_2, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_1[grid(256)](buf0, primals_3, primals_4, primals_2, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_3, primals_4, buf0 class AconCNew(nn.Module): """ ACON activation (activate or not). AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. """ def __init__(self, c1): super().__init__() self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) def forward(self, input_0): primals_1 = self.p1 primals_2 = self.p2 primals_4 = self.beta primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
PoCInnovation/Koic
AconC
false
8,679
[ "MIT" ]
13
eca53b53b7242c1e83213ef9408366ca0a346358
https://github.com/PoCInnovation/Koic/tree/eca53b53b7242c1e83213ef9408366ca0a346358
SimpleShortCut
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/hw/chwnxluv75cgqzsnc3u5gyhefcrmnntvnlapjehzslwopxwe7jbq.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # pad => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%slice_4, [0, 0, 0, 0, 1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 4) % 6 x0 = xindex % 2 x3 = (xindex // 24) x5 = (xindex // 2) % 12 x6 = xindex tmp0 = (-1) + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-16) + (2*x0) + (8*x5) + (64*x3)), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x6), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 2, 2), (24, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 96, grid=grid(96), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SimpleShortCut(nn.Module): def __init__(self, planes): super().__init__() self.planes = planes // 4 def forward(self, x): return F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, self.planes, self. planes), 'constant', 0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'planes': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 4 % 6 x0 = xindex % 2 x3 = xindex // 24 x5 = xindex // 2 % 12 x6 = xindex tmp0 = -1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-16 + 2 * x0 + 8 * x5 + 64 * x3), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + x6, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 2, 2), (24, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(96)](arg0_1, buf0, 96, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleShortCutNew(nn.Module): def __init__(self, planes): super().__init__() self.planes = planes // 4 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
RaoefTaki/MNTDP-forked
SimpleShortCut
false
8,680
[ "MIT" ]
15
d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
https://github.com/RaoefTaki/MNTDP-forked/tree/d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
DoubleDeltaTransform
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/ep/cepcfmep7fsqyimep6o4baxdpzkh4lerieewdrsyomrem3ahq6vp.py # Topologically Sorted Source Nodes: [specgram_1], Original ATen: [aten.replication_pad1d] # Source node to ATen node mapping: # specgram_1 => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view, [None, None, %clamp_max]), kwargs = {}) triton_poi_fused_replication_pad1d_0 = async_compile.triton('triton_poi_fused_replication_pad1d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad1d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*x1) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/dt/cdtvvavtco3rdoc4fsockaoo6vmpqbrfkjm7qyy26t7qa6rmosnp.py # Topologically Sorted Source Nodes: [arange, kernel], Original ATen: [aten.arange, aten.repeat] # Source node to ATen node mapping: # arange => add, convert_element_type, iota_1, mul # kernel => repeat # Graph fragment: # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (5,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_1, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, -2), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%convert_element_type, [64, 1, 1]), kwargs = {}) triton_poi_fused_arange_repeat_1 = async_compile.triton('triton_poi_fused_arange_repeat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x2 = xindex tmp0 = (-2) + x0 tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/d4/cd4nsigimjbiakg2fbwj3jmlijos4cgwhyl7lz5wvqn6cp6hu6y4.py # Topologically Sorted Source Nodes: [specgram_3], Original ATen: [aten.replication_pad1d] # Source node to ATen node mapping: # specgram_3 => _unsafe_index_1 # Graph fragment: # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_2, [None, None, %clamp_max_1]), kwargs = {}) triton_poi_fused_replication_pad1d_2 = async_compile.triton('triton_poi_fused_replication_pad1d_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad1d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*x1) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/hb/chbztmelnx7synz7kkphpc33fmatx4khxylkrtwrjlgx6y2dwxrx.py # Topologically Sorted Source Nodes: [hstack], Original ATen: [aten.cat] # Source node to ATen node mapping: # hstack => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %view_1, %view_3], 1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 12 x0 = xindex % 16 x2 = (xindex // 192) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0) tmp11 = 0.1 tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tmp16 = tl.full([1], 12, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp15 & xmask, other=0.0) tmp19 = tmp18 * tmp11 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp14, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + (x3), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [specgram_1], Original ATen: [aten.replication_pad1d] stream0 = get_raw_stream(0) triton_poi_fused_replication_pad1d_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0) buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [arange, kernel], Original ATen: [aten.arange, aten.repeat] triton_poi_fused_arange_repeat_1.run(buf1, 320, grid=grid(320), stream=stream0) # Topologically Sorted Source Nodes: [specgram_1, arange, kernel, conv1d], Original ATen: [aten.replication_pad1d, aten.arange, aten.repeat, aten.convolution] buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf2, (1, 64, 4), (256, 4, 1)) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [specgram_3], Original ATen: [aten.replication_pad1d] triton_poi_fused_replication_pad1d_2.run(buf2, buf3, 512, grid=grid(512), stream=stream0) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [arange_1, kernel_1], Original ATen: [aten.arange, aten.repeat] triton_poi_fused_arange_repeat_1.run(buf4, 320, grid=grid(320), stream=stream0) # Topologically Sorted Source Nodes: [specgram_3, arange_1, kernel_1, conv1d_1], Original ATen: [aten.replication_pad1d, aten.arange, aten.repeat, aten.convolution] buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf5, (1, 64, 4), (256, 4, 1)) del buf3 del buf4 buf6 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hstack], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(arg0_1, buf2, buf5, buf6, 768, grid=grid(768), stream=stream0) del arg0_1 del buf2 del buf5 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torchaudio class DoubleDeltaTransform(torch.nn.Module): """A transformation to compute delta and double delta features. Args: win_length (int): The window length to use for computing deltas (Default: 5). mode (str): Mode parameter passed to padding (Default: replicate). """ def __init__(self, win_length: 'int'=5, mode: 'str'='replicate') ->None: super().__init__() self.win_length = win_length self.mode = mode self._delta = torchaudio.transforms.ComputeDeltas(win_length=self. win_length, mode=self.mode) def forward(self, X): """ Args: specgram (Tensor): Tensor of audio of dimension (..., freq, time). Returns: Tensor: specgram, deltas and double deltas of size (..., 3*freq, time). """ delta = self._delta(X) double_delta = self._delta(delta) return torch.hstack((X, delta, double_delta)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torchaudio assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x2 = xindex tmp0 = -2 + x0 tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x2, tmp1, xmask) @triton.jit def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 12 x0 = xindex % 16 x2 = xindex // 192 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask, other=0.0) tmp11 = 0.1 tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp18 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp15 & xmask, other=0.0) tmp19 = tmp18 * tmp11 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp14, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + x3, tmp23, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad1d_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32) triton_poi_fused_arange_repeat_1[grid(320)](buf1, 320, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf2, (1, 64, 4), (256, 4, 1)) buf3 = buf0 del buf0 triton_poi_fused_replication_pad1d_2[grid(512)](buf2, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) buf4 = buf1 del buf1 triton_poi_fused_arange_repeat_1[grid(320)](buf4, 320, XBLOCK=256, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf5, (1, 64, 4), (256, 4, 1)) del buf3 del buf4 buf6 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_3[grid(768)](arg0_1, buf2, buf5, buf6, 768, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf2 del buf5 return buf6, class DoubleDeltaTransformNew(torch.nn.Module): """A transformation to compute delta and double delta features. Args: win_length (int): The window length to use for computing deltas (Default: 5). mode (str): Mode parameter passed to padding (Default: replicate). """ def __init__(self, win_length: 'int'=5, mode: 'str'='replicate') ->None: super().__init__() self.win_length = win_length self.mode = mode self._delta = torchaudio.transforms.ComputeDeltas(win_length=self. win_length, mode=self.mode) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
RUB-SysSec/WaveFake
DoubleDeltaTransform
false
8,681
[ "MIT" ]
20
d52d51b9ccdb0cec3f484e84b228791f06b955be
https://github.com/RUB-SysSec/WaveFake/tree/d52d51b9ccdb0cec3f484e84b228791f06b955be
Conv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/gq/cgqljn6fofmvlx76f2mtdyu4z5wzozqgecbne3i6b2ynjwjyztif.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [2, 1, 2, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 7) % 7 x0 = xindex % 7 x2 = (xindex // 49) x4 = xindex tmp0 = (-2) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-2) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-10) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/7r/c7r2u57hr54idc3of6lw2ouxuoyy44tzonl7cy4k7awnnjece2kt.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.utils.data import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F def get_causal_padding(kernel_size, strides, dilation_rate, n_dims=2): p_ = [] for i in range(n_dims - 1, -1, -1): if strides[i] > 1 and dilation_rate[i] > 1: raise ValueError("can't have the stride and dilation over 1") p = (kernel_size[i] - strides[i]) * dilation_rate[i] p_ += p, 0 return p_ def get_same_padding(kernel_size, strides, dilation_rate, n_dims=2): p_ = [] for i in range(n_dims - 1, -1, -1): if strides[i] > 1 and dilation_rate[i] > 1: raise ValueError("Can't have the stride and dilation rate over 1") p = (kernel_size[i] - strides[i]) * dilation_rate[i] if p % 2 == 0: p = p // 2, p // 2 else: p = int(np.ceil(p / 2)), int(np.floor(p / 2)) p_ += p return tuple(p_) def get_valid_padding(n_dims=2): p_ = (0,) * 2 * n_dims return p_ class Conv2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='same', dilation=1, *args, **kwargs): if isinstance(kernel_size, int): kernel_size = (kernel_size,) * 2 if isinstance(stride, int): stride = (stride,) * 2 if isinstance(dilation, int): dilation = (dilation,) * 2 self.stride = stride self.padding_str = padding.upper() if self.padding_str == 'SAME': self.pad_values = get_same_padding(kernel_size, stride, dilation) elif self.padding_str == 'VALID': self.pad_values = get_valid_padding() elif self.padding_str == 'CAUSAL': self.pad_values = get_causal_padding(kernel_size, stride, dilation) else: raise ValueError self.condition = np.sum(self.pad_values) != 0 super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, *args, **kwargs) def reset_parameters(self) ->None: init.xavier_uniform_(self.weight) if self.bias is not None: init.zeros_(self.bias) def forward(self, x): if self.condition: x = F.pad(x, self.pad_values) x = super(Conv2d, self).forward(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.utils.data import torch import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -2 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -2 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-10 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 def get_causal_padding(kernel_size, strides, dilation_rate, n_dims=2): p_ = [] for i in range(n_dims - 1, -1, -1): if strides[i] > 1 and dilation_rate[i] > 1: raise ValueError("can't have the stride and dilation over 1") p = (kernel_size[i] - strides[i]) * dilation_rate[i] p_ += p, 0 return p_ def get_same_padding(kernel_size, strides, dilation_rate, n_dims=2): p_ = [] for i in range(n_dims - 1, -1, -1): if strides[i] > 1 and dilation_rate[i] > 1: raise ValueError("Can't have the stride and dilation rate over 1") p = (kernel_size[i] - strides[i]) * dilation_rate[i] if p % 2 == 0: p = p // 2, p // 2 else: p = int(np.ceil(p / 2)), int(np.floor(p / 2)) p_ += p return tuple(p_) def get_valid_padding(n_dims=2): p_ = (0,) * 2 * n_dims return p_ class Conv2dNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='same', dilation=1, *args, **kwargs): if isinstance(kernel_size, int): kernel_size = (kernel_size,) * 2 if isinstance(stride, int): stride = (stride,) * 2 if isinstance(dilation, int): dilation = (dilation,) * 2 self.stride = stride self.padding_str = padding.upper() if self.padding_str == 'SAME': self.pad_values = get_same_padding(kernel_size, stride, dilation) elif self.padding_str == 'VALID': self.pad_values = get_valid_padding() elif self.padding_str == 'CAUSAL': self.pad_values = get_causal_padding(kernel_size, stride, dilation) else: raise ValueError self.condition = np.sum(self.pad_values) != 0 super(Conv2dNew, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, *args, **kwargs) def reset_parameters(self) ->None: init.xavier_uniform_(self.weight) if self.bias is not None: init.zeros_(self.bias) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Rayhane-mamah/Efficient-VDVAE
Conv2d
false
8,682
[ "MIT" ]
41
07bcb8ba58c228ab0ed62c5cf374c19a10932010
https://github.com/Rayhane-mamah/Efficient-VDVAE/tree/07bcb8ba58c228ab0ed62c5cf374c19a10932010
MyLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/bz/cbzoc7dzbmzz72kezsf3tnrhq6ixl5um5hyehdq6lmm4rvctfb7m.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/qw/cqw2ahskucrmwkbadqswj76db4kfcxn5w76kji7peia7kwu4zbkq.py # Topologically Sorted Source Nodes: [bias], Original ATen: [aten.mul] # Source node to ATen node mapping: # bias => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [bias], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_1, buf1, 4, grid=grid(4), stream=stream0) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [bias, linear], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_1, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class MyLinearNew(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, input_0): primals_2 = self.weight primals_1 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Qingyang-Xu/GANInversion_with_ConsecutiveImgs
MyLinear
false
8,683
[ "MIT" ]
23
9078a48ec3474dacdd02693b051e3addef1c5697
https://github.com/Qingyang-Xu/GANInversion_with_ConsecutiveImgs/tree/9078a48ec3474dacdd02693b051e3addef1c5697
SPoC
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/e7/ce73hyb6fl47lsvuo6oc4nyc7nbjn2cooo36plrte4gsotp7fcxm.py # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SPoC(nn.Module): def __init__(self): super(SPoC, self).__init__() def forward(self, x): return F.avg_pool2d(x, (x.size(-2), x.size(-1))) def __repr__(self): return self.__class__.__name__ + '()' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class SPoCNew(nn.Module): def __init__(self): super(SPoCNew, self).__init__() def __repr__(self): return self.__class__.__name__ + '()' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
RetrainIt/Perfect-Half-Million-Beauty-Product-Image-Recognition-Challenge
SPoC
false
8,684
[ "Apache-2.0" ]
15
080aa5ae2f2755c6dc10b7cdc910ec0f76bc82c3
https://github.com/RetrainIt/Perfect-Half-Million-Beauty-Product-Image-Recognition-Challenge/tree/080aa5ae2f2755c6dc10b7cdc910ec0f76bc82c3
Deconv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/vq/cvq57ejlhwibzq7qbznnnqsheyhuzhcukxp7sni6mbmaf54c33ni.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1)) buf1 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 400, grid=grid(400), stream=stream0) del buf0 del primals_2 return (buf2, primals_1, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Deconv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Deconv2d, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.dropout is not None: x = self.dropout(x) x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1)) buf1 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(400)](buf0, primals_2, buf1, buf2, 400, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return buf2, primals_1, primals_3, buf1 class Deconv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Deconv2dNew, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RQuispeC/pytorch-ACSCP
Deconv2d
false
8,685
[ "MIT" ]
25
c83f08632012c2245250ff9c5140814461db575c
https://github.com/RQuispeC/pytorch-ACSCP/tree/c83f08632012c2245250ff9c5140814461db575c
ConstMult
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/jt/cjtbsn6ugcmw5bgb3oil7bdarpcuo5lvtjjsrmrsygj2vxcibit4.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConstMult(nn.Module): def __init__(self, alpha=1.0): super().__init__() self.alpha = nn.Parameter(torch.Tensor(1)) nn.init.constant_(self.alpha, alpha) def forward(self, x): return self.alpha * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class ConstMultNew(nn.Module): def __init__(self, alpha=1.0): super().__init__() self.alpha = nn.Parameter(torch.Tensor(1)) nn.init.constant_(self.alpha, alpha) def forward(self, input_0): primals_1 = self.alpha primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
RaoefTaki/MNTDP-forked
ConstMult
false
8,686
[ "MIT" ]
15
d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
https://github.com/RaoefTaki/MNTDP-forked/tree/d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
ncm_output
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/bv/cbv57vdaucxuwf56cnumwvktmhn5ncm5ejimpltj536bncgdiyi6.py # Topologically Sorted Source Nodes: [sub, norm, pow_1, mul, sub_1], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.pow, aten.mul] # Source node to ATen node mapping: # mul => mul # norm => pow_1, pow_2, sum_1 # pow_1 => pow_3 # sub => sub # sub_1 => sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_3, -1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_linalg_vector_norm_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_linalg_vector_norm_mul_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_linalg_vector_norm_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_linalg_vector_norm_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tmp19 * tmp19 tmp21 = -1.0 tmp22 = tmp20 * tmp21 tmp24 = tmp22 - tmp23 tl.store(out_ptr0 + (x2), tmp24, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, norm, pow_1, mul, sub_1], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.pow, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_linalg_vector_norm_mul_pow_sub_0.run(primals_1, primals_2, primals_3, buf0, 16, grid=grid(16), stream=stream0) del primals_3 return (buf0, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ncm_output(nn.Module): def __init__(self, indim, outdim): super(ncm_output, self).__init__() self.linear = nn.Linear(indim, outdim) def forward(self, x): return -1 * torch.norm(x.reshape(x.shape[0], 1, -1) - self.linear. weight.transpose(0, 1).reshape(1, -1, x.shape[1]), dim=2).pow(2 ) - self.linear.bias def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'indim': 4, 'outdim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_linalg_vector_norm_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = tmp19 * tmp19 tmp21 = -1.0 tmp22 = tmp20 * tmp21 tmp24 = tmp22 - tmp23 tl.store(out_ptr0 + x2, tmp24, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_linalg_vector_norm_mul_pow_sub_0[grid(16)](primals_1, primals_2, primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf0, primals_1, primals_2 class ncm_outputNew(nn.Module): def __init__(self, indim, outdim): super(ncm_outputNew, self).__init__() self.linear = nn.Linear(indim, outdim) def forward(self, input_0): primals_1 = self.linear.weight primals_3 = self.linear.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RafLaf/easy
ncm_output
false
8,687
[ "MIT" ]
25
3e3603aef7dfb1cf469820330d695b93ba76dfd4
https://github.com/RafLaf/easy/tree/3e3603aef7dfb1cf469820330d695b93ba76dfd4
ValueFunction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/qs/cqsgbydauqwdluexymrszxq7nqjh3tcbqqiq5ccrsj74pb354tqf.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return (reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class ValueFunction(nn.Module): def __init__(self, width, n_states): super(ValueFunction, self).__init__() self.linear1 = nn.Linear(n_states, width) nn.init.normal_(self.linear1.weight, 0.0, 1 / np.sqrt(n_states)) torch.nn.init.constant_(self.linear1.bias, 0.0) self.linear2 = nn.Linear(width, 1) nn.init.normal_(self.linear2.weight, 0.0, 1 / np.sqrt(width)) torch.nn.init.constant_(self.linear2.bias, 0.0) def forward(self, x): x = torch.tanh(self.linear1(x)) value = self.linear2(x) return value def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'width': 4, 'n_states': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4 class ValueFunctionNew(nn.Module): def __init__(self, width, n_states): super(ValueFunctionNew, self).__init__() self.linear1 = nn.Linear(n_states, width) nn.init.normal_(self.linear1.weight, 0.0, 1 / np.sqrt(n_states)) torch.nn.init.constant_(self.linear1.bias, 0.0) self.linear2 = nn.Linear(width, 1) nn.init.normal_(self.linear2.weight, 0.0, 1 / np.sqrt(width)) torch.nn.init.constant_(self.linear2.bias, 0.0) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
RajGhugare19/VE-principle-for-model-based-RL
ValueFunction
false
8,688
[ "MIT" ]
16
a9f94dfc9317a0ccc60bc7c558dcec1ebc6d0c63
https://github.com/RajGhugare19/VE-principle-for-model-based-RL/tree/a9f94dfc9317a0ccc60bc7c558dcec1ebc6d0c63
DotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/wz/cwzlgmghy6nxuchbiog4puo46i4tq7yhd3qu6ftkgjf3gwib6hxn.py # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yh/cyhf6bhaqimi2pucos5fnrpvhrt4vuaetbxnooyr5pvgjt7s6fgo.py # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BaseAttention(nn.Module): def __init__(self): super().__init__() def forward(self, *args, **kwargs): raise NotImplementedError class DotProductAttention(BaseAttention): """Dot Product Attention""" def __init__(self, dropout_rate=0.0, **kwargs): """Initialize DotProductAttention Args: dropout_rate (float): attention dropout_rate rate """ super().__init__() self.dropout = nn.Dropout(dropout_rate) def forward(self, q, k, v, attn_mask=None): """Forward Args: q (torch.Tensor): Query matrix, (B, T_q, D_q) k (torch.Tensor): Key matrix, (B, T_k, D_k) v (torch.Tensor): Value matrix, (B, T_v, D_v) T_v = T_k, D_v = D_k attn_mask (torch.BoolTensor | None): Mask tensor. True element will be masked. Returns: output (B, T_q, D_v); attention (B, T_q, T_k) """ attention = torch.bmm(q, k.permute(0, 2, 1)) if attn_mask is not None: attention.masked_fill_(attn_mask, -np.inf) attention = F.softmax(attention, dim=-1) attention = self.dropout(attention) output = attention.bmm(v) return output, attention def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class BaseAttention(nn.Module): def __init__(self): super().__init__() def forward(self, *args, **kwargs): raise NotImplementedError class DotProductAttentionNew(BaseAttention): """Dot Product Attention""" def __init__(self, dropout_rate=0.0, **kwargs): """Initialize DotProductAttention Args: dropout_rate (float): attention dropout_rate rate """ super().__init__() self.dropout = nn.Dropout(dropout_rate) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
ROBINADC/BiGRU-CRF-with-Attention-for-NER
DotProductAttention
false
8,689
[ "MIT" ]
27
b9e037ebd6e1d56500ffb60c6030013982c17ded
https://github.com/ROBINADC/BiGRU-CRF-with-Attention-for-NER/tree/b9e037ebd6e1d56500ffb60c6030013982c17ded
Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/24/c24qsgijonbiqjcskkesmr6djddhrqjlc6pskdyvv3cj26t4733k.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/wk/cwkmrcckbwmqrnn75bcrj6x53nm4p3l2vitrgxgtbfaftyuxfsme.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yc/cycpqq6onyduooxckvxtyedrulus22t4a7axbh5v3ohmlynteeib.py # Topologically Sorted Source Nodes: [mul, attn], Original ATen: [aten.mul, aten.clone] # Source node to ATen node mapping: # attn => clone # mul => mul_2 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 1.0), kwargs = {}) # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_mul_2 = async_compile.triton('triton_poi_fused_clone_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_mul_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zl/czlqk3zraa2s6rxtxkrdczoils3dsupq3gczxyfy2d7gczk7zfe3.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/4k/c4kg4hmh7yhxnm3x743m364fnnrgj4ivvu2jfwdiwfvanbvyd4f2.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vm/cvmxc36ua3qvi5sfzotzrqa4y5ilb4k4ptxzsfqpgxth35vrel6k.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/dq/cdqntcymln4qdkhe6gvvdsmb76qsnxodwahqvlhhiapm6v3oepxb.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/rb/crbg4ppq53vvjejpxahbyutypksm57ramujfio3wyfmhkcvytz5j.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/5i/c5i5whyfnlmmdjdjlswewjrqx3jiavt7l2vj26iprli4o4orjnoh.py # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => var_mean_1 # x_1 => add_2 # x_3 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/fi/cfiqg43urfzw3cnwoqneyfrtrtuyhd4kxnjlp3mhxzbnzqbxt25k.py # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2 # x_1 => add_2 # x_3 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_7), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_8), kwargs = {}) triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/je/cje2ja4gamjci3dk56itxpm3ztwq2mkof6wmwcso4vraw4em4tzf.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_5 => add_6, erf, mul_5, mul_6, mul_7 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_6), kwargs = {}) triton_poi_fused_gelu_10 = async_compile.triton('triton_poi_fused_gelu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/lb/clbno36riizq6k6f5pkjrxpsw4ixdgtukijbaxlejgrjc3ma5irv.py # Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add] # Source node to ATen node mapping: # x_1 => add_2 # x_3 => add_3 # x_9 => add_7 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_15), kwargs = {}) triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16, ), (1, )) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, attn], Original ATen: [aten.mul, aten.clone] triton_poi_fused_clone_mul_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf3, buf9, 16, 4, grid=grid(16, 4), stream=stream0) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1; del buf1 # reuse buf14 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf12, primals_6, buf13, buf14, 16, grid=grid(16), stream=stream0) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_9.run(primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, grid=grid(64), stream=stream0) del buf13 del buf14 del primals_8 buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu] triton_poi_fused_gelu_10.run(buf16, buf17, 256, grid=grid(256), stream=stream0) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add] triton_poi_fused_add_11.run(buf19, primals_3, buf12, primals_6, primals_12, 64, grid=grid(64), stream=stream0) del primals_12 return (buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q * self.scale @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_mul_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16,), (1,)) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_mul_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK =4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12, primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_8 buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0) del buf7 extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_gelu_10[grid(256)](buf16, buf17, 256, XBLOCK=256, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12, primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0 ), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4 class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q * self.scale @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class BlockNew(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=nn.LayerNorm): super().__init__() self.norm1 = norm_layer(dim) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) def forward(self, input_0): primals_1 = self.norm1.weight primals_2 = self.norm1.bias primals_6 = self.norm2.weight primals_7 = self.norm2.bias primals_9 = self.mlp.fc1.weight primals_10 = self.mlp.fc1.bias primals_11 = self.mlp.fc2.weight primals_8 = self.mlp.fc2.bias primals_4 = self.attn.qkv.weight primals_5 = self.attn.proj.weight primals_12 = self.attn.proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
Pang-Yatian/Point-MAE
Block
false
8,690
[ "MIT" ]
42
61727f76e9d0c28babf422505073bd43c2f517bc
https://github.com/Pang-Yatian/Point-MAE/tree/61727f76e9d0c28babf422505073bd43c2f517bc
ContextAttentionLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/iz/cizel6izm3qkx5lyj557vl577qy64d5c3k6q5zrmzr4hnfoczj2w.py # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %permute_3), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(in_out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/hq/chqo6wzjtpel4egzrzr4xwgxayri6aa22rfqya3f4lqokw6sxt7a.py # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten._softmax] # Source node to ATen node mapping: # input_6 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/wp/cwpr2jp7bpqwkbaxgttcl2ekjdtkf6lezuspoiunutzd3j2qgeyk.py # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten._softmax] # Source node to ATen node mapping: # input_6 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yv/cyvuloemaqj43mnhdenrcok4u3lpimupy4kn7s7u47a7nn67xd3i.py # Topologically Sorted Source Nodes: [mul, output], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # output => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp8 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, ), (1, )) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16, ), (1, )) assert_size_stride(primals_7, (1, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1) del primals_5 buf2 = reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf2, primals_4, buf1, primals_6, 256, grid=grid(256), stream=stream0) del buf1 del primals_4 del primals_6 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 16), (16, 1), 0), reinterpret_tensor(primals_7, (16, 1), (1, 16), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [input_6], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [mul, output], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_3.run(primals_1, buf5, buf6, 16, grid=grid(16), stream=stream0) return (buf6, buf5, primals_1, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf2, buf5, primals_7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from collections import OrderedDict import torch.nn as nn class Squeeze(nn.Module): """Squeeze wrapper for nn.Sequential.""" def forward(self, data): return torch.squeeze(data) class Temperature(nn.Module): """Temperature wrapper for nn.Sequential.""" def __init__(self, temperature): super(Temperature, self).__init__() self.temperature = temperature def forward(self, data): return data / self.temperature class ContextAttentionLayer(nn.Module): """ Implements context attention as in the PaccMann paper (Figure 2C) in Molecular Pharmaceutics. With the additional option of having a hidden size in the context. NOTE: In tensorflow, weights were initialized from N(0,0.1). Instead, pytorch uses U(-stddev, stddev) where stddev=1./math.sqrt(weight.size(1)). """ def __init__(self, reference_hidden_size: 'int', reference_sequence_length: 'int', context_hidden_size: 'int', context_sequence_length: 'int'=1, attention_size: 'int'=16, individual_nonlinearity: 'type'=nn.Sequential(), temperature: 'float'=1.0): """Constructor Arguments: reference_hidden_size (int): Hidden size of the reference input over which the attention will be computed (H). reference_sequence_length (int): Sequence length of the reference (T). context_hidden_size (int): This is either simply the amount of features used as context (G) or, if the context is a sequence itself, the hidden size of each time point. context_sequence_length (int): Hidden size in the context, useful if context is also textual data, i.e. coming from nn.Embedding. Defaults to 1. attention_size (int): Hyperparameter of the attention layer, defaults to 16. individual_nonlinearities (type): This is an optional nonlinearity applied to each projection. Defaults to nn.Sequential(), i.e. no nonlinearity. Otherwise it expects a torch.nn activation function, e.g. nn.ReLU(). temperature (float): Temperature parameter to smooth or sharpen the softmax. Defaults to 1. Temperature > 1 flattens the distribution, temperature below 1 makes it spikier. """ super().__init__() self.reference_sequence_length = reference_sequence_length self.reference_hidden_size = reference_hidden_size self.context_sequence_length = context_sequence_length self.context_hidden_size = context_hidden_size self.attention_size = attention_size self.individual_nonlinearity = individual_nonlinearity self.temperature = temperature self.reference_projection = nn.Sequential(OrderedDict([( 'projection', nn.Linear(reference_hidden_size, attention_size)), ('act_fn', individual_nonlinearity)])) self.context_projection = nn.Sequential(OrderedDict([('projection', nn.Linear(context_hidden_size, attention_size)), ('act_fn', individual_nonlinearity)])) if context_sequence_length > 1: self.context_hidden_projection = nn.Sequential(OrderedDict([( 'projection', nn.Linear(context_sequence_length, reference_sequence_length)), ('act_fn', individual_nonlinearity)])) else: self.context_hidden_projection = nn.Sequential() self.alpha_projection = nn.Sequential(OrderedDict([('projection', nn.Linear(attention_size, 1, bias=False)), ('squeeze', Squeeze( )), ('temperature', Temperature(self.temperature)), ('softmax', nn.Softmax(dim=1))])) def forward(self, reference: 'torch.Tensor', context: 'torch.Tensor'): """ Forward pass through a context attention layer Arguments: reference (torch.Tensor): This is the reference input on which attention is computed. Shape: batch_size x ref_seq_length x ref_hidden_size context (torch.Tensor): This is the context used for attention. Shape: batch_size x context_seq_length x context_hidden_size Returns: (output, attention_weights): A tuple of two Tensors, first one containing the reference filtered by attention (shape: batch_size x context_hidden_size x 1) and the second one the attention weights (batch_size x context_sequence_length x 1). """ assert len(reference.shape) == 3, 'Reference tensor needs to be 3D' assert len(context.shape) == 3, 'Context tensor needs to be 3D' reference_attention = self.reference_projection(reference) context_attention = self.context_hidden_projection(self. context_projection(context).permute(0, 2, 1)).permute(0, 2, 1) alphas = self.alpha_projection(torch.tanh(reference_attention + context_attention)) output = torch.sum(reference * torch.unsqueeze(alphas, -1), 1) return output, alphas def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'reference_hidden_size': 4, 'reference_sequence_length': 4, 'context_hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from collections import OrderedDict import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16, 4), (4, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (1, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1) del primals_5 buf2 = reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](buf2, primals_4, buf1, primals_6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_4 del primals_6 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 16), (16, 1), 0), reinterpret_tensor(primals_7, (16, 1), (1, 16), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0) del buf3 triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused_mul_sum_3[grid(16)](primals_1, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf6, buf5, primals_1, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf2, buf5, primals_7 class Squeeze(nn.Module): """Squeeze wrapper for nn.Sequential.""" def forward(self, data): return torch.squeeze(data) class Temperature(nn.Module): """Temperature wrapper for nn.Sequential.""" def __init__(self, temperature): super(Temperature, self).__init__() self.temperature = temperature def forward(self, data): return data / self.temperature class ContextAttentionLayerNew(nn.Module): """ Implements context attention as in the PaccMann paper (Figure 2C) in Molecular Pharmaceutics. With the additional option of having a hidden size in the context. NOTE: In tensorflow, weights were initialized from N(0,0.1). Instead, pytorch uses U(-stddev, stddev) where stddev=1./math.sqrt(weight.size(1)). """ def __init__(self, reference_hidden_size: 'int', reference_sequence_length: 'int', context_hidden_size: 'int', context_sequence_length: 'int'=1, attention_size: 'int'=16, individual_nonlinearity: 'type'=nn.Sequential(), temperature: 'float'=1.0): """Constructor Arguments: reference_hidden_size (int): Hidden size of the reference input over which the attention will be computed (H). reference_sequence_length (int): Sequence length of the reference (T). context_hidden_size (int): This is either simply the amount of features used as context (G) or, if the context is a sequence itself, the hidden size of each time point. context_sequence_length (int): Hidden size in the context, useful if context is also textual data, i.e. coming from nn.Embedding. Defaults to 1. attention_size (int): Hyperparameter of the attention layer, defaults to 16. individual_nonlinearities (type): This is an optional nonlinearity applied to each projection. Defaults to nn.Sequential(), i.e. no nonlinearity. Otherwise it expects a torch.nn activation function, e.g. nn.ReLU(). temperature (float): Temperature parameter to smooth or sharpen the softmax. Defaults to 1. Temperature > 1 flattens the distribution, temperature below 1 makes it spikier. """ super().__init__() self.reference_sequence_length = reference_sequence_length self.reference_hidden_size = reference_hidden_size self.context_sequence_length = context_sequence_length self.context_hidden_size = context_hidden_size self.attention_size = attention_size self.individual_nonlinearity = individual_nonlinearity self.temperature = temperature self.reference_projection = nn.Sequential(OrderedDict([( 'projection', nn.Linear(reference_hidden_size, attention_size)), ('act_fn', individual_nonlinearity)])) self.context_projection = nn.Sequential(OrderedDict([('projection', nn.Linear(context_hidden_size, attention_size)), ('act_fn', individual_nonlinearity)])) if context_sequence_length > 1: self.context_hidden_projection = nn.Sequential(OrderedDict([( 'projection', nn.Linear(context_sequence_length, reference_sequence_length)), ('act_fn', individual_nonlinearity)])) else: self.context_hidden_projection = nn.Sequential() self.alpha_projection = nn.Sequential(OrderedDict([('projection', nn.Linear(attention_size, 1, bias=False)), ('squeeze', Squeeze( )), ('temperature', Temperature(self.temperature)), ('softmax', nn.Softmax(dim=1))])) def forward(self, input_0, input_1): primals_3 = self.reference_projection.projection.weight primals_4 = self.reference_projection.projection.bias primals_5 = self.context_projection.projection.weight primals_6 = self.context_projection.projection.bias primals_7 = self.alpha_projection.projection.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
PaccMann/paccmann_predictor
ContextAttentionLayer
false
8,691
[ "MIT" ]
19
58071311310c45c1efabb34a4003b96a1c58901a
https://github.com/PaccMann/paccmann_predictor/tree/58071311310c45c1efabb34a4003b96a1c58901a
StyleMod
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/mx/cmx6dfvbwwboegeo6cdkzoh6dehxfv227gvzvph6d5f2okoxf6us.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ff/cffjfugu73gwgosaw5dogfgr2rkm6o5hkmlbc3clibdbj6tbyhvb.py # Topologically Sorted Source Nodes: [add, mul_2, x], Original ATen: [aten.add, aten.mul] # Source node to ATen node mapping: # add => add # mul_2 => mul_2 # x => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, 1.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %select_1), kwargs = {}) triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 4 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1 + (8*x2)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (4 + x1 + (8*x2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = tmp5 + tmp3 tmp7 = tmp0 * tmp6 tmp10 = tmp9 * tmp3 tmp11 = tmp8 + tmp10 tmp12 = tmp7 + tmp11 tl.store(out_ptr0 + (x3), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, ), (1, )) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mul_2, x], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_1.run(primals_4, buf1, primals_1, buf2, 4096, grid=grid(4096), stream=stream0) del buf1 del primals_1 return (buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) class StyleMod(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(StyleMod, self).__init__() self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale =use_wscale) def forward(self, x, latent): style = self.lin(latent) shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1] style = style.view(shape) x = x * (style[:, 0] + 1.0) + style[:, 1] return x def get_inputs(): return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_size': 4, 'channels': 4, 'use_wscale': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 4 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x1 + 8 * x2), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (4 + x1 + 8 * x2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = tmp5 + tmp3 tmp7 = tmp0 * tmp6 tmp10 = tmp9 * tmp3 tmp11 = tmp8 + tmp10 tmp12 = tmp7 + tmp11 tl.store(out_ptr0 + x3, tmp12, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8,), (1,)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(32)](primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_1[grid(4096)](primals_4, buf1, primals_1, buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_1 return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) class StyleModNew(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(StyleModNew, self).__init__() self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale =use_wscale) def forward(self, input_0, input_1): primals_2 = self.lin.weight primals_1 = self.lin.bias primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Qingyang-Xu/GANInversion_with_ConsecutiveImgs
StyleMod
false
8,692
[ "MIT" ]
23
9078a48ec3474dacdd02693b051e3addef1c5697
https://github.com/Qingyang-Xu/GANInversion_with_ConsecutiveImgs/tree/9078a48ec3474dacdd02693b051e3addef1c5697
DC
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/kq/ckqglep2uxxmtpvso6erxkbhcljhsx4d2chzqc2ps72gtorqejiz.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten._softmax] # Source node to ATen node mapping: # output => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/p4/cp4k2x7bxxn5ihiymyq4rj3sw2wwu5gz2bpvj6zy2rbnipmq3a6g.py # Topologically Sorted Source Nodes: [output, mul, intersect, sum_2, sum_3], Original ATen: [aten._softmax, aten.mul, aten.sum] # Source node to ATen node mapping: # intersect => sum_2 # mul => mul # output => div, sum_1 # sum_2 => sum_3 # sum_3 => sum_4 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %arg1_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3]), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div, [2, 3]), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [2, 3]), kwargs = {}) triton_per_fused__softmax_mul_sum_1 = async_compile.triton('triton_per_fused__softmax_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (16 + r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (32 + r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (48 + r2 + (64*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (r2 + (16*x3)), xmask, other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp21 = tl.where(xmask, tmp19, 0) tmp22 = tl.sum(tmp21, 1)[:, None] tl.store(out_ptr1 + (x3), tmp14, xmask) tl.store(out_ptr2 + (x3), tmp18, xmask) tl.store(out_ptr3 + (x3), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yh/cyhah6vortjcnxg36l2dmgozizwveauhipphucmub4yj4cnfvaeb.py # Topologically Sorted Source Nodes: [mul_1, add, add_1, dice, mean, result], Original ATen: [aten.mul, aten.add, aten.div, aten.mean, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # dice => div_1 # mean => mean # mul_1 => mul_1 # result => sub_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, %sum_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-19), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mean), kwargs = {}) triton_per_fused_add_div_mean_mul_rsub_2 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp4 = tl.load(in_ptr2 + (r0), None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp5 = tmp3 + tmp4 tmp6 = 1e-19 tmp7 = tmp5 + tmp6 tmp8 = tmp2 / tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 16.0 tmp13 = tmp11 / tmp12 tmp14 = 1.0 tmp15 = tmp14 - tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output, mul, intersect, sum_2, sum_3], Original ATen: [aten._softmax, aten.mul, aten.sum] triton_per_fused__softmax_mul_sum_1.run(buf0, arg1_1, buf2, buf3, buf4, 16, 16, grid=grid(16), stream=stream0) del arg1_1 del buf0 buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [mul_1, add, add_1, dice, mean, result], Original ATen: [aten.mul, aten.add, aten.div, aten.mean, aten.rsub] triton_per_fused_add_div_mean_mul_rsub_2.run(buf6, buf2, buf3, buf4, 1, 16, grid=grid(1), stream=stream0) del buf2 del buf3 del buf4 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional class DC(nn.Module): def __init__(self, nb_classes): super(DC, self).__init__() self.softmax = nn.Softmax(1) self.nb_classes = nb_classes @staticmethod def onehot(gt, shape): gt = gt.long() y_onehot = torch.zeros(shape) y_onehot = y_onehot y_onehot.scatter_(1, gt, 1) return y_onehot def reshape(self, output, target): output.shape[0] if not all([(i == j) for i, j in zip(output.shape, target.shape)]): target = self.onehot(target, output.shape) target = target.permute(0, 2, 3, 4, 1) output = output.permute(0, 2, 3, 4, 1) None return output, target def dice(self, output, target): output = self.softmax(output) if not all([(i == j) for i, j in zip(output.shape, target.shape)]): target = self.onehot(target, output.shape) sum_axis = list(range(2, len(target.shape))) s = 1e-19 intersect = torch.sum(output * target, sum_axis) dice = 2 * intersect / (torch.sum(output, sum_axis) + torch.sum( target, sum_axis) + s) return 1.0 - dice.mean() def forward(self, output, target): result = self.dice(output, target) return result def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nb_classes': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_per_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp2 = tl.load(in_ptr0 + (16 + r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp4 = tl.load(in_ptr0 + (32 + r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (48 + r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0) tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp21 = tl.where(xmask, tmp19, 0) tmp22 = tl.sum(tmp21, 1)[:, None] tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp18, xmask) tl.store(out_ptr3 + x3, tmp22, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr2 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp5 = tmp3 + tmp4 tmp6 = 1e-19 tmp7 = tmp5 + tmp6 tmp8 = tmp2 / tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 16.0 tmp13 = tmp11 / tmp12 tmp14 = 1.0 tmp15 = tmp14 - tmp13 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused__softmax_mul_sum_1[grid(16)](buf0, arg1_1, buf2, buf3, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg1_1 del buf0 buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 triton_per_fused_add_div_mean_mul_rsub_2[grid(1)](buf6, buf2, buf3, buf4, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf3 del buf4 return buf6, class DCNew(nn.Module): def __init__(self, nb_classes): super(DCNew, self).__init__() self.softmax = nn.Softmax(1) self.nb_classes = nb_classes @staticmethod def onehot(gt, shape): gt = gt.long() y_onehot = torch.zeros(shape) y_onehot = y_onehot y_onehot.scatter_(1, gt, 1) return y_onehot def reshape(self, output, target): output.shape[0] if not all([(i == j) for i, j in zip(output.shape, target.shape)]): target = self.onehot(target, output.shape) target = target.permute(0, 2, 3, 4, 1) output = output.permute(0, 2, 3, 4, 1) None return output, target def dice(self, output, target): output = self.softmax(output) if not all([(i == j) for i, j in zip(output.shape, target.shape)]): target = self.onehot(target, output.shape) sum_axis = list(range(2, len(target.shape))) s = 1e-19 intersect = torch.sum(output * target, sum_axis) dice = 2 * intersect / (torch.sum(output, sum_axis) + torch.sum( target, sum_axis) + s) return 1.0 - dice.mean() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ReubenDo/InExtremIS
DC
false
8,693
[ "MIT" ]
17
1512ddf9b8c11c4d9f0ebd465d904ef3d539d350
https://github.com/ReubenDo/InExtremIS/tree/1512ddf9b8c11c4d9f0ebd465d904ef3d539d350
ExponentialUpdate
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/hq/chqebm45grnegtc2csp7i7sfdlmozx4p545hmsw3m4uizihl4snf.py # Topologically Sorted Source Nodes: [mul, mul_1, add], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # mul_1 => mul_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -3.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 4.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp1 = -3.0 tmp2 = tmp0 * tmp1 tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, add], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from torch import nn from torch.jit import Final class ExponentialUpdate(nn.Module): alpha: 'Final[int]' def __init__(self, alpha: 'float'): super().__init__() self.alpha = float(alpha) def forward(self, x: 'Tensor', state: 'Tensor') ->Tensor: return x * (1 - self.alpha) + state * self.alpha def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'alpha': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn from torch.jit import Final assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = -3.0 tmp2 = tmp0 * tmp1 tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class ExponentialUpdateNew(nn.Module): alpha: 'Final[int]' def __init__(self, alpha: 'float'): super().__init__() self.alpha = float(alpha) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Rikorose/clc-dns-challenge-2020
ExponentialUpdate
false
8,694
[ "Apache-2.0" ]
12
4f1c078691327a75b3a338fe372ba356b450a6da
https://github.com/Rikorose/clc-dns-challenge-2020/tree/4f1c078691327a75b3a338fe372ba356b450a6da
Network
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/v2/cv24vzifrszeh6ykktpm44yxw6ipfsqq636xos3qcxnfqk5ld5ed.py # Topologically Sorted Source Nodes: [hidden_neurons], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # hidden_neurons => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 30 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (30, 4), (4, 1)) assert_size_stride(primals_2, (30, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 30), (30, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 30), (30, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 30), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 30), (480, 120, 30, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool) # Topologically Sorted Source Nodes: [hidden_neurons], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 1920, grid=grid(1920), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [q_values], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 30), (30, 1), 0), reinterpret_tensor(primals_4, (30, 4), (1, 30), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 30), (30, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((30, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((30, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 30), (30, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Network(nn.Module): def __init__(self, input_size, number_of_actions): super(Network, self).__init__() self.input_size = input_size self.number_of_actions = number_of_actions self.full_connection1 = nn.Linear(input_size, 30) self.full_connection2 = nn.Linear(30, number_of_actions) def forward(self, state): hidden_neurons = F.relu(self.full_connection1(state)) q_values = self.full_connection2(hidden_neurons) return q_values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'number_of_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 30 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (30, 4), (4, 1)) assert_size_stride(primals_2, (30,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 30), (30, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 30), (30, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 30), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 30), (480, 120, 30, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1920)](buf1, primals_2, buf3, 1920, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 30), (30, 1), 0), reinterpret_tensor(primals_4, (30, 4), (1, 30), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 30), (30, 1), 0), primals_4, buf3 class NetworkNew(nn.Module): def __init__(self, input_size, number_of_actions): super(NetworkNew, self).__init__() self.input_size = input_size self.number_of_actions = number_of_actions self.full_connection1 = nn.Linear(input_size, 30) self.full_connection2 = nn.Linear(30, number_of_actions) def forward(self, input_0): primals_1 = self.full_connection1.weight primals_2 = self.full_connection1.bias primals_4 = self.full_connection2.weight primals_5 = self.full_connection2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Radu-Raicea/self-driving-car-ai
Network
false
8,695
[ "MIT" ]
16
cf2b42472f7e78dd3bd530c0c7cd547988a8b0d2
https://github.com/Radu-Raicea/self-driving-car-ai/tree/cf2b42472f7e78dd3bd530c0c7cd547988a8b0d2
GatedPooling1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/2u/c2udrypr3ijcffpewjzpng56qrgmliz5n5yvcl52crrm77htfjsq.py # Topologically Sorted Source Nodes: [stack, alpha, max_pool2d, mul, sub, avg_pool2d, mul_1, add], Original ATen: [aten.stack, aten.sigmoid, aten.max_pool2d_with_indices, aten.mul, aten.rsub, aten.avg_pool2d, aten.add] # Source node to ATen node mapping: # add => add # alpha => sigmoid # avg_pool2d => avg_pool2d # max_pool2d => getitem_4 # mul => mul # mul_1 => mul_1 # stack => cat # sub => sub # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%squeeze, %squeeze_1, %squeeze_2, %squeeze_3], 1), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view,), kwargs = {}) # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem_4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [4, 4], [4, 4]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %avg_pool2d), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_mul_rsub_sigmoid_stack_0 = async_compile.triton('triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_mul_rsub_sigmoid_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_mul_rsub_sigmoid_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 21, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_mul_rsub_sigmoid_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp37 = tl.load(in_ptr5 + (16*x2), xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr5 + (1 + (16*x2)), xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr5 + (2 + (16*x2)), xmask, eviction_policy='evict_last') tmp42 = tl.load(in_ptr5 + (3 + (16*x2)), xmask, eviction_policy='evict_last') tmp44 = tl.load(in_ptr5 + (4 + (16*x2)), xmask, eviction_policy='evict_last') tmp46 = tl.load(in_ptr5 + (5 + (16*x2)), xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr5 + (6 + (16*x2)), xmask, eviction_policy='evict_last') tmp50 = tl.load(in_ptr5 + (7 + (16*x2)), xmask, eviction_policy='evict_last') tmp52 = tl.load(in_ptr5 + (8 + (16*x2)), xmask, eviction_policy='evict_last') tmp54 = tl.load(in_ptr5 + (9 + (16*x2)), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr5 + (10 + (16*x2)), xmask, eviction_policy='evict_last') tmp58 = tl.load(in_ptr5 + (11 + (16*x2)), xmask, eviction_policy='evict_last') tmp60 = tl.load(in_ptr5 + (12 + (16*x2)), xmask, eviction_policy='evict_last') tmp62 = tl.load(in_ptr5 + (13 + (16*x2)), xmask, eviction_policy='evict_last') tmp64 = tl.load(in_ptr5 + (14 + (16*x2)), xmask, eviction_policy='evict_last') tmp66 = tl.load(in_ptr5 + (15 + (16*x2)), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp5 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 2, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp15 + tmp7 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tmp0 >= tmp12 tmp20 = tl.full([1], 3, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr3 + (x1), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp23 + tmp7 tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp22, tmp24, tmp25) tmp27 = tmp0 >= tmp20 tmp28 = tl.full([1], 4, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tl.load(in_ptr4 + (x1), tmp27 & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tmp30 + tmp7 tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype) tmp33 = tl.where(tmp27, tmp31, tmp32) tmp34 = tl.where(tmp22, tmp26, tmp33) tmp35 = tl.where(tmp14, tmp18, tmp34) tmp36 = tl.where(tmp4, tmp10, tmp35) tmp39 = triton_helpers.maximum(tmp38, tmp37) tmp41 = triton_helpers.maximum(tmp40, tmp39) tmp43 = triton_helpers.maximum(tmp42, tmp41) tmp45 = triton_helpers.maximum(tmp44, tmp43) tmp47 = triton_helpers.maximum(tmp46, tmp45) tmp49 = triton_helpers.maximum(tmp48, tmp47) tmp51 = triton_helpers.maximum(tmp50, tmp49) tmp53 = triton_helpers.maximum(tmp52, tmp51) tmp55 = triton_helpers.maximum(tmp54, tmp53) tmp57 = triton_helpers.maximum(tmp56, tmp55) tmp59 = triton_helpers.maximum(tmp58, tmp57) tmp61 = triton_helpers.maximum(tmp60, tmp59) tmp63 = triton_helpers.maximum(tmp62, tmp61) tmp65 = triton_helpers.maximum(tmp64, tmp63) tmp67 = triton_helpers.maximum(tmp66, tmp65) tmp68 = tmp38 + tmp37 tmp69 = tmp40 + tmp68 tmp70 = tmp42 + tmp69 tmp71 = tmp44 + tmp70 tmp72 = tmp46 + tmp71 tmp73 = tmp48 + tmp72 tmp74 = tmp50 + tmp73 tmp75 = tmp52 + tmp74 tmp76 = tmp54 + tmp75 tmp77 = tmp56 + tmp76 tmp78 = tmp58 + tmp77 tmp79 = tmp60 + tmp78 tmp80 = tmp62 + tmp79 tmp81 = tmp64 + tmp80 tmp82 = tmp66 + tmp81 tmp83 = 0.0625 tmp84 = tmp82 * tmp83 tmp85 = tl.sigmoid(tmp36) tmp86 = tmp85 * tmp67 tmp87 = 1.0 tmp88 = tmp87 - tmp85 tmp89 = tmp88 * tmp84 tmp90 = tmp86 + tmp89 tl.store(out_ptr0 + (x2), tmp36, xmask) tl.store(out_ptr1 + (x2), tmp67, xmask) tl.store(out_ptr2 + (x2), tmp84, xmask) tl.store(out_ptr3 + (x2), tmp90, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), primals_2, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 1, 1), (1, 1, 1, 1)) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), primals_2, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 1, 1), (1, 1, 1, 1)) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), primals_2, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1)) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 0, 4, 1), 48), primals_2, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 1, 1), (1, 1, 1, 1)) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [stack, alpha, max_pool2d, mul, sub, avg_pool2d, mul_1, add], Original ATen: [aten.stack, aten.sigmoid, aten.max_pool2d_with_indices, aten.mul, aten.rsub, aten.avg_pool2d, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_mul_rsub_sigmoid_stack_0.run(buf0, primals_3, buf1, buf2, buf3, primals_1, buf4, buf5, buf6, buf7, 16, grid=grid(16), stream=stream0) del buf0 del buf1 del buf2 del buf3 del primals_3 return (buf7, primals_2, reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4, 1), 16), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4, 1), 32), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4, 1), 48), buf4, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GatedPooling1(nn.Module): """ Gated pooling as defined in https://arxiv.org/abs/1509.08985 This implementation is the L variant ( entire layer, one parameter ) """ def __init__(self, kernel_size): super(GatedPooling1, self).__init__() self.avgpool = nn.AvgPool2d(kernel_size) self.maxpool = nn.MaxPool2d(kernel_size) self.transform = nn.Conv2d(1, 1, kernel_size=kernel_size, stride= kernel_size) torch.nn.init.kaiming_normal_(self.transform.weight) def forward(self, x): xs = [self.transform(x_filt.unsqueeze(1)).squeeze(1) for x_filt in torch.unbind(x, dim=1)] alpha = torch.sigmoid(torch.stack(xs, 1)) return alpha * self.maxpool(x) + (1 - alpha) * self.avgpool(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_mul_rsub_sigmoid_stack_0( in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp37 = tl.load(in_ptr5 + 16 * x2, xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr5 + (1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp40 = tl.load(in_ptr5 + (2 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr5 + (3 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp44 = tl.load(in_ptr5 + (4 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp46 = tl.load(in_ptr5 + (5 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp48 = tl.load(in_ptr5 + (6 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr5 + (7 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp52 = tl.load(in_ptr5 + (8 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp54 = tl.load(in_ptr5 + (9 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp56 = tl.load(in_ptr5 + (10 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp58 = tl.load(in_ptr5 + (11 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp60 = tl.load(in_ptr5 + (12 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp62 = tl.load(in_ptr5 + (13 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp64 = tl.load(in_ptr5 + (14 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp66 = tl.load(in_ptr5 + (15 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp5 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 2, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp15 + tmp7 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tmp0 >= tmp12 tmp20 = tl.full([1], 3, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr3 + x1, tmp22 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tmp23 + tmp7 tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp22, tmp24, tmp25) tmp27 = tmp0 >= tmp20 tl.full([1], 4, tl.int64) tmp30 = tl.load(in_ptr4 + x1, tmp27 & xmask, eviction_policy= 'evict_last', other=0.0) tmp31 = tmp30 + tmp7 tmp32 = tl.full(tmp31.shape, 0.0, tmp31.dtype) tmp33 = tl.where(tmp27, tmp31, tmp32) tmp34 = tl.where(tmp22, tmp26, tmp33) tmp35 = tl.where(tmp14, tmp18, tmp34) tmp36 = tl.where(tmp4, tmp10, tmp35) tmp39 = triton_helpers.maximum(tmp38, tmp37) tmp41 = triton_helpers.maximum(tmp40, tmp39) tmp43 = triton_helpers.maximum(tmp42, tmp41) tmp45 = triton_helpers.maximum(tmp44, tmp43) tmp47 = triton_helpers.maximum(tmp46, tmp45) tmp49 = triton_helpers.maximum(tmp48, tmp47) tmp51 = triton_helpers.maximum(tmp50, tmp49) tmp53 = triton_helpers.maximum(tmp52, tmp51) tmp55 = triton_helpers.maximum(tmp54, tmp53) tmp57 = triton_helpers.maximum(tmp56, tmp55) tmp59 = triton_helpers.maximum(tmp58, tmp57) tmp61 = triton_helpers.maximum(tmp60, tmp59) tmp63 = triton_helpers.maximum(tmp62, tmp61) tmp65 = triton_helpers.maximum(tmp64, tmp63) tmp67 = triton_helpers.maximum(tmp66, tmp65) tmp68 = tmp38 + tmp37 tmp69 = tmp40 + tmp68 tmp70 = tmp42 + tmp69 tmp71 = tmp44 + tmp70 tmp72 = tmp46 + tmp71 tmp73 = tmp48 + tmp72 tmp74 = tmp50 + tmp73 tmp75 = tmp52 + tmp74 tmp76 = tmp54 + tmp75 tmp77 = tmp56 + tmp76 tmp78 = tmp58 + tmp77 tmp79 = tmp60 + tmp78 tmp80 = tmp62 + tmp79 tmp81 = tmp64 + tmp80 tmp82 = tmp66 + tmp81 tmp83 = 0.0625 tmp84 = tmp82 * tmp83 tmp85 = tl.sigmoid(tmp36) tmp86 = tmp85 * tmp67 tmp87 = 1.0 tmp88 = tmp87 - tmp85 tmp89 = tmp88 * tmp84 tmp90 = tmp86 + tmp89 tl.store(out_ptr0 + x2, tmp36, xmask) tl.store(out_ptr1 + x2, tmp67, xmask) tl.store(out_ptr2 + x2, tmp84, xmask) tl.store(out_ptr3 + x2, tmp90, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), primals_2, stride=(4, 4), padding= (0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 1, 1), (1, 1, 1, 1)) buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), primals_2, stride=(4, 4), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 1, 1), (1, 1, 1, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), primals_2, stride=(4, 4), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1)) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 0, 4, 1), 48), primals_2, stride=(4, 4), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 1, 1), (1, 1, 1, 1)) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_mul_rsub_sigmoid_stack_0[ grid(16)](buf0, primals_3, buf1, buf2, buf3, primals_1, buf4, buf5, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del buf1 del buf2 del buf3 del primals_3 return buf7, primals_2, reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4, 1), 16), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4, 1), 32), reinterpret_tensor(primals_1, (4, 1, 4, 4), (64, 16, 4, 1), 48 ), buf4, buf5, buf6 class GatedPooling1New(nn.Module): """ Gated pooling as defined in https://arxiv.org/abs/1509.08985 This implementation is the L variant ( entire layer, one parameter ) """ def __init__(self, kernel_size): super(GatedPooling1New, self).__init__() self.avgpool = nn.AvgPool2d(kernel_size) self.maxpool = nn.MaxPool2d(kernel_size) self.transform = nn.Conv2d(1, 1, kernel_size=kernel_size, stride= kernel_size) torch.nn.init.kaiming_normal_(self.transform.weight) def forward(self, input_0): primals_2 = self.transform.weight primals_3 = self.transform.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RicherMans/Dcase2018_pooling
GatedPooling1
false
8,696
[ "Apache-2.0" ]
13
10540502bba7215a1ba157614b39fedecb079d9b
https://github.com/RicherMans/Dcase2018_pooling/tree/10540502bba7215a1ba157614b39fedecb079d9b
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/cn/ccnvkf7kfnskbbfy2kwx55oghjftngamwdttghryrfs4g3fay72l.py # Topologically Sorted Source Nodes: [a], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # a => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zt/cztuojq6lirzwvskmwlxfnort5k5gctqpastb7irgmkdeensys6e.py # Topologically Sorted Source Nodes: [tanh, mul], Original ATen: [aten.tanh, aten.mul] # Source node to ATen node mapping: # mul => mul # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 4), kwargs = {}) triton_poi_fused_mul_tanh_1 = async_compile.triton('triton_poi_fused_mul_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (4, 256), (256, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [a], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 16384, grid=grid(16384), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [a_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 16384, grid=grid(16384), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, mul], Original ATen: [aten.tanh, aten.mul] triton_poi_fused_mul_tanh_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), buf4, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def weight_init(m): """Custom weight init for Conv2D and Linear layers.""" if isinstance(m, nn.Linear): nn.init.orthogonal_(m.weight.data) m.bias.data.fill_(0.0) elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): assert m.weight.size(2) == m.weight.size(3) m.weight.data.fill_(0.0) m.bias.data.fill_(0.0) mid = m.weight.size(2) // 2 gain = nn.init.calculate_gain('relu') nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain) class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action, hidden_dim=256): super(Actor, self).__init__() self.l1 = nn.Linear(state_dim, hidden_dim) self.l2 = nn.Linear(hidden_dim, hidden_dim) self.l3 = nn.Linear(hidden_dim, action_dim) self.max_action = max_action self.apply(weight_init) def forward(self, state): a = F.relu(self.l1(state)) a = F.relu(self.l2(a)) return self.max_action * torch.tanh(self.l3(a)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_mul_tanh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 256), (256, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3, primals_5, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_tanh_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), buf4, primals_6, buf6, primals_4, buf7 def weight_init(m): """Custom weight init for Conv2D and Linear layers.""" if isinstance(m, nn.Linear): nn.init.orthogonal_(m.weight.data) m.bias.data.fill_(0.0) elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): assert m.weight.size(2) == m.weight.size(3) m.weight.data.fill_(0.0) m.bias.data.fill_(0.0) mid = m.weight.size(2) // 2 gain = nn.init.calculate_gain('relu') nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain) class ActorNew(nn.Module): def __init__(self, state_dim, action_dim, max_action, hidden_dim=256): super(ActorNew, self).__init__() self.l1 = nn.Linear(state_dim, hidden_dim) self.l2 = nn.Linear(hidden_dim, hidden_dim) self.l3 = nn.Linear(hidden_dim, action_dim) self.max_action = max_action self.apply(weight_init) def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
LQNew/LWDRL
Actor
false
8,697
[ "MIT" ]
11
0e4fab077a0cfbd27590b840557f4fda033c74ff
https://github.com/LQNew/LWDRL/tree/0e4fab077a0cfbd27590b840557f4fda033c74ff
Conv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/wa/cwarb3rsaiknlg4svzdrngr6wzsdszxrjz5tgmmluw7cffxr7nms.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 9) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 144, grid=grid(144), stream=stream0) del buf0 del primals_2 return (buf2, primals_1, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Conv2d, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.dropout is not None: x = self.dropout(x) x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 9 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(144)](buf0, primals_2, buf1, buf2, 144, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf2, primals_1, primals_3, buf1 class Conv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Conv2dNew, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RQuispeC/pytorch-ACSCP
Conv2d
false
8,698
[ "MIT" ]
25
c83f08632012c2245250ff9c5140814461db575c
https://github.com/RQuispeC/pytorch-ACSCP/tree/c83f08632012c2245250ff9c5140814461db575c
GatedPooling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/fw/cfw3p53dnoaisg2fotxobujo3hcyrj245b6443ptshy5r65izoxh.py # Topologically Sorted Source Nodes: [conv2d, alpha, max_pool2d, mul, sub, avg_pool2d, mul_1, add], Original ATen: [aten.convolution, aten.sigmoid, aten.max_pool2d_with_indices, aten.mul, aten.rsub, aten.avg_pool2d, aten.add] # Source node to ATen node mapping: # add => add # alpha => sigmoid # avg_pool2d => avg_pool2d # conv2d => convolution # max_pool2d => getitem # mul => mul # mul_1 => mul_1 # sub => sub # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_3, [4, 4], [4, 4]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %avg_pool2d), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_avg_pool2d_convolution_max_pool2d_with_indices_mul_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_avg_pool2d_convolution_max_pool2d_with_indices_mul_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_convolution_max_pool2d_with_indices_mul_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_avg_pool2d_convolution_max_pool2d_with_indices_mul_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (16*x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (2 + (16*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (3 + (16*x2)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (4 + (16*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (5 + (16*x2)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (6 + (16*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (7 + (16*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (8 + (16*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (9 + (16*x2)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (10 + (16*x2)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr1 + (11 + (16*x2)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (12 + (16*x2)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (13 + (16*x2)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr1 + (14 + (16*x2)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr1 + (15 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp17 = triton_helpers.maximum(tmp16, tmp15) tmp19 = triton_helpers.maximum(tmp18, tmp17) tmp21 = triton_helpers.maximum(tmp20, tmp19) tmp23 = triton_helpers.maximum(tmp22, tmp21) tmp25 = triton_helpers.maximum(tmp24, tmp23) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp31 = triton_helpers.maximum(tmp30, tmp29) tmp33 = triton_helpers.maximum(tmp32, tmp31) tmp34 = tmp4 + tmp3 tmp35 = tmp6 + tmp34 tmp36 = tmp8 + tmp35 tmp37 = tmp10 + tmp36 tmp38 = tmp12 + tmp37 tmp39 = tmp14 + tmp38 tmp40 = tmp16 + tmp39 tmp41 = tmp18 + tmp40 tmp42 = tmp20 + tmp41 tmp43 = tmp22 + tmp42 tmp44 = tmp24 + tmp43 tmp45 = tmp26 + tmp44 tmp46 = tmp28 + tmp45 tmp47 = tmp30 + tmp46 tmp48 = tmp32 + tmp47 tmp49 = 0.0625 tmp50 = tmp48 * tmp49 tmp51 = tl.sigmoid(tmp2) tmp52 = tmp51 * tmp33 tmp53 = 1.0 tmp54 = tmp53 - tmp51 tmp55 = tmp54 * tmp50 tmp56 = tmp52 + tmp55 tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp33, xmask) tl.store(out_ptr1 + (x2), tmp50, xmask) tl.store(out_ptr2 + (x2), tmp56, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, alpha, max_pool2d, mul, sub, avg_pool2d, mul_1, add], Original ATen: [aten.convolution, aten.sigmoid, aten.max_pool2d_with_indices, aten.mul, aten.rsub, aten.avg_pool2d, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_avg_pool2d_convolution_max_pool2d_with_indices_mul_rsub_sigmoid_0.run(buf1, primals_2, primals_3, buf2, buf3, buf4, 16, grid=grid(16), stream=stream0) del primals_2 return (buf4, primals_1, primals_3, buf1, buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GatedPooling(nn.Module): """ Gated pooling as defined in https://arxiv.org/abs/1509.08985 This implementation is the LR variant """ def __init__(self, kernel_size, filter): super(GatedPooling, self).__init__() self.avgpool = nn.AvgPool2d(kernel_size) self.maxpool = nn.MaxPool2d(kernel_size) self.transform = nn.Conv2d(filter, filter, kernel_size=kernel_size, stride=kernel_size) def forward(self, x): alpha = torch.sigmoid(self.transform(x)) return alpha * self.maxpool(x) + (1 - alpha) * self.avgpool(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4, 'filter': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_avg_pool2d_convolution_max_pool2d_with_indices_mul_rsub_sigmoid_0( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 16 * x2, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp6 = tl.load(in_ptr1 + (2 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr1 + (3 + 16 * x2), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr1 + (4 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (5 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + (6 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (7 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr1 + (8 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (9 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (10 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr1 + (11 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (12 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr1 + (13 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr1 + (14 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr1 + (15 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp15 = triton_helpers.maximum(tmp14, tmp13) tmp17 = triton_helpers.maximum(tmp16, tmp15) tmp19 = triton_helpers.maximum(tmp18, tmp17) tmp21 = triton_helpers.maximum(tmp20, tmp19) tmp23 = triton_helpers.maximum(tmp22, tmp21) tmp25 = triton_helpers.maximum(tmp24, tmp23) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp31 = triton_helpers.maximum(tmp30, tmp29) tmp33 = triton_helpers.maximum(tmp32, tmp31) tmp34 = tmp4 + tmp3 tmp35 = tmp6 + tmp34 tmp36 = tmp8 + tmp35 tmp37 = tmp10 + tmp36 tmp38 = tmp12 + tmp37 tmp39 = tmp14 + tmp38 tmp40 = tmp16 + tmp39 tmp41 = tmp18 + tmp40 tmp42 = tmp20 + tmp41 tmp43 = tmp22 + tmp42 tmp44 = tmp24 + tmp43 tmp45 = tmp26 + tmp44 tmp46 = tmp28 + tmp45 tmp47 = tmp30 + tmp46 tmp48 = tmp32 + tmp47 tmp49 = 0.0625 tmp50 = tmp48 * tmp49 tmp51 = tl.sigmoid(tmp2) tmp52 = tmp51 * tmp33 tmp53 = 1.0 tmp54 = tmp53 - tmp51 tmp55 = tmp54 * tmp50 tmp56 = tmp52 + tmp55 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp33, xmask) tl.store(out_ptr1 + x2, tmp50, xmask) tl.store(out_ptr2 + x2, tmp56, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_avg_pool2d_convolution_max_pool2d_with_indices_mul_rsub_sigmoid_0[ grid(16)](buf1, primals_2, primals_3, buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf4, primals_1, primals_3, buf1, buf2, buf3 class GatedPoolingNew(nn.Module): """ Gated pooling as defined in https://arxiv.org/abs/1509.08985 This implementation is the LR variant """ def __init__(self, kernel_size, filter): super(GatedPoolingNew, self).__init__() self.avgpool = nn.AvgPool2d(kernel_size) self.maxpool = nn.MaxPool2d(kernel_size) self.transform = nn.Conv2d(filter, filter, kernel_size=kernel_size, stride=kernel_size) def forward(self, input_0): primals_1 = self.transform.weight primals_2 = self.transform.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RicherMans/Dcase2018_pooling
GatedPooling
false
8,699
[ "Apache-2.0" ]
13
10540502bba7215a1ba157614b39fedecb079d9b
https://github.com/RicherMans/Dcase2018_pooling/tree/10540502bba7215a1ba157614b39fedecb079d9b
StaticArchGenerator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/y2/cy25s5y43zkbgrbnvhjujpfwhgrcmw47z3si4uiyfobzuys3jnse.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(primals_1, buf0, 4, grid=grid(4), stream=stream0) del primals_1 return (buf0, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.init as weight_init from torch.nn import Parameter class ArchSampler(nn.Module): def __init__(self, distrib_dim, all_same, deter_eval, var_names=None, * args, **kwargs): super().__init__() self.distrib_dim = distrib_dim self.all_same = all_same self.deter_eval = deter_eval self.frozen = False self.log_probas = None self.distrib_entropies = None self._seq_probas = None if var_names is not None: assert len(var_names) == self.distrib_dim self.var_names = var_names def freeze(self): self.frozen = True def start_new_sequence(self): self.log_probas = [] self.distrib_entropies = [] self._seq_probas = [] def nodes_to_prune(self, treshold): nodes = [] for node, weight in zip(self.var_names, self().squeeze().unbind()): if weight < treshold: nodes.append(node) return nodes def sample_archs(self, batch_size, probas): """ Hook called by pytorch before each forward :param: Current module :param input: Input given to the module's forward :return: """ self._check_probas(probas, self.all_same) if probas.size(0) != batch_size: if probas.size(0) != 1: raise ValueError( "Sampling probabilities dimensions {} doesn't match with batch size {}." .format(probas.size(), batch_size)) if not self.all_same: probas = probas.expand(batch_size, -1) distrib = torch.distributions.Bernoulli(probas) if not self.training and self.deter_eval: samplings = (probas > 0.5).float() else: samplings = distrib.sample() if self.all_same: samplings = samplings.expand(batch_size, -1) self._seq_probas.append(probas) self.distrib_entropies.append(distrib.entropy()) self.log_probas.append(distrib.log_prob(samplings)) return samplings def _check_probas(self, probas, all_same): """ :param probas: B_size*N_nodes Tensor containing the probability of each arch being sampled in the nex forward. :param all_same: if True, the same sampling will be used for the whole batch in the next forward. :return: """ if probas.dim() != 2 or all_same and probas.size(0) != 1: raise ValueError( 'probas params has wrong dimension: {} (all_same={})'. format(probas.size(), all_same)) if probas.size(-1) != self.distrib_dim: raise ValueError( 'Should have exactly as many probas as the number of stochastic nodes({}), got {} instead.' .format(self.distrib_dim, probas.size(-1))) @property def last_arch_probas(self): return self.probas @property def last_sequence_probas(self): """ :return: The probabilities of each arch for the last sequence in format (seq_len*batch_size*n_sampling_params) """ return torch.stack(self._seq_probas) class StaticArchGenerator(ArchSampler): def __init__(self, initial_p, *args, **kwargs): super().__init__(*args, **kwargs) self.params = Parameter(torch.Tensor(1, self.distrib_dim)) logit = np.log(initial_p / (1 - initial_p) ) if initial_p < 1 else float('inf') weight_init.constant_(self.params, logit) def forward(self, z=None): if self.frozen and self.training: raise RuntimeError( 'Trying to sample from a frozen distrib gen in training mode') return self.params.sigmoid() def entropy(self): distrib = torch.distributions.Bernoulli(self.params.sigmoid()) return distrib.entropy() def remove_var(self, name): assert self.var_names self.distrib_dim -= 1 remove_idx = self.var_names.index(name) self.var_names.remove(name) all_idx = torch.ones_like(self.params).bool() all_idx[0, remove_idx] = 0 self.params = nn.Parameter(self.params[all_idx].unsqueeze(0)) def is_deterministic(self): distrib = self() return torch.equal(distrib, distrib ** 2) def get_inputs(): return [] def get_init_inputs(): return [[], {'initial_p': 4, 'distrib_dim': 4, 'all_same': 4, 'deter_eval': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn import torch.nn.init as weight_init from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(4)](primals_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_1 return buf0, buf0 class ArchSampler(nn.Module): def __init__(self, distrib_dim, all_same, deter_eval, var_names=None, * args, **kwargs): super().__init__() self.distrib_dim = distrib_dim self.all_same = all_same self.deter_eval = deter_eval self.frozen = False self.log_probas = None self.distrib_entropies = None self._seq_probas = None if var_names is not None: assert len(var_names) == self.distrib_dim self.var_names = var_names def freeze(self): self.frozen = True def start_new_sequence(self): self.log_probas = [] self.distrib_entropies = [] self._seq_probas = [] def nodes_to_prune(self, treshold): nodes = [] for node, weight in zip(self.var_names, self().squeeze().unbind()): if weight < treshold: nodes.append(node) return nodes def sample_archs(self, batch_size, probas): """ Hook called by pytorch before each forward :param: Current module :param input: Input given to the module's forward :return: """ self._check_probas(probas, self.all_same) if probas.size(0) != batch_size: if probas.size(0) != 1: raise ValueError( "Sampling probabilities dimensions {} doesn't match with batch size {}." .format(probas.size(), batch_size)) if not self.all_same: probas = probas.expand(batch_size, -1) distrib = torch.distributions.Bernoulli(probas) if not self.training and self.deter_eval: samplings = (probas > 0.5).float() else: samplings = distrib.sample() if self.all_same: samplings = samplings.expand(batch_size, -1) self._seq_probas.append(probas) self.distrib_entropies.append(distrib.entropy()) self.log_probas.append(distrib.log_prob(samplings)) return samplings def _check_probas(self, probas, all_same): """ :param probas: B_size*N_nodes Tensor containing the probability of each arch being sampled in the nex forward. :param all_same: if True, the same sampling will be used for the whole batch in the next forward. :return: """ if probas.dim() != 2 or all_same and probas.size(0) != 1: raise ValueError( 'probas params has wrong dimension: {} (all_same={})'. format(probas.size(), all_same)) if probas.size(-1) != self.distrib_dim: raise ValueError( 'Should have exactly as many probas as the number of stochastic nodes({}), got {} instead.' .format(self.distrib_dim, probas.size(-1))) @property def last_arch_probas(self): return self.probas @property def last_sequence_probas(self): """ :return: The probabilities of each arch for the last sequence in format (seq_len*batch_size*n_sampling_params) """ return torch.stack(self._seq_probas) class StaticArchGeneratorNew(ArchSampler): def __init__(self, initial_p, *args, **kwargs): super().__init__(*args, **kwargs) self.params = Parameter(torch.Tensor(1, self.distrib_dim)) logit = np.log(initial_p / (1 - initial_p) ) if initial_p < 1 else float('inf') weight_init.constant_(self.params, logit) def entropy(self): distrib = torch.distributions.Bernoulli(self.params.sigmoid()) return distrib.entropy() def remove_var(self, name): assert self.var_names self.distrib_dim -= 1 remove_idx = self.var_names.index(name) self.var_names.remove(name) all_idx = torch.ones_like(self.params).bool() all_idx[0, remove_idx] = 0 self.params = nn.Parameter(self.params[all_idx].unsqueeze(0)) def is_deterministic(self): distrib = self() return torch.equal(distrib, distrib ** 2) def forward(self): primals_1 = self.params output = call([primals_1]) return output[0]
RaoefTaki/MNTDP-forked
StaticArchGenerator
false
8,700
[ "MIT" ]
15
d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
https://github.com/RaoefTaki/MNTDP-forked/tree/d9ea59a6638f6cdc93eca180ab02672f5bf5d2a1
PMA
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/hv/chv7m4fc52mdvsvi543asiruw7qww65fwfdxhjiuhvfn5lqumlpq.py # Topologically Sorted Source Nodes: [Q_], Original ATen: [aten.cat] # Source node to ATen node mapping: # Q_ => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_1, %getitem_2, %getitem_3],), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4*x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + (4*((-4) + x0))), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (2 + (4*((-8) + x0))), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (3 + (4*((-12) + x0))), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x0), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/tj/ctj2xnoux3cn7w56gxs6hpicdagsiu4zb4dom4od6voujshwgzie.py # Topologically Sorted Source Nodes: [A], Original ATen: [aten._softmax] # Source node to ATen node mapping: # A => div_1, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float("-inf")) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/fq/cfqwft6dz4fpsvuwioex77z56kqurxctenywfa72old47ny4ciza.py # Topologically Sorted Source Nodes: [attn, O], Original ATen: [aten.cat, aten.add] # Source node to ATen node mapping: # O => add # attn => cat_3 # Graph fragment: # %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem_12, %getitem_13, %getitem_14, %getitem_15], -1), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%addmm, %cat_3), kwargs = {}) triton_poi_fused_add_cat_2 = async_compile.triton('triton_poi_fused_add_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cat_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = x0 tmp2 = tl.full([1], 0, tl.int64) tmp3 = tmp1 >= tmp2 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr0 + (x1), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1], 2, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr0 + (4 + x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tmp13 = tl.full([1], 3, tl.int64) tmp14 = tmp1 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr0 + (8 + x1), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp1 >= tmp13 tmp18 = tl.full([1], 4, tl.int64) tmp19 = tmp1 < tmp18 tmp20 = tl.load(in_ptr0 + (12 + x1), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tl.where(tmp15, tmp16, tmp20) tmp22 = tl.where(tmp10, tmp11, tmp21) tmp23 = tl.where(tmp5, tmp6, tmp22) tmp24 = tmp0 + tmp23 tl.store(in_out_ptr0 + (x2), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vi/cvia7hnsmbaj62zpkiccjajpbfnpgvwttuvei34ggjsqqzpejdng.py # Topologically Sorted Source Nodes: [relu, O_1], Original ATen: [aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # O_1 => add_1 # relu => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_10), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %relu), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [Q], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_4 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [K], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, primals_1, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [V], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, primals_1, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [Q_], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, buf3, 16, grid=grid(16), stream=stream0) buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [V_], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf2, buf4, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf2, (16, 1), (1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [K_], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf1, buf5, 16, grid=grid(16), stream=stream0) buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(buf3, reinterpret_tensor(buf5, (1, 16), (0, 1), 0), out=buf6) buf9 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [A], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf6, buf9, 16, 16, grid=grid(16), stream=stream0) del buf6 buf10 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm] extern_kernels.mm(buf9, buf4, out=buf10) buf11 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attn, O], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_2.run(buf11, buf10, 16, grid=grid(16), stream=stream0) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf11, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu, O_1], Original ATen: [aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_3.run(buf11, buf12, primals_10, buf13, buf14, 16, grid=grid(16), stream=stream0) del buf12 del primals_10 return (buf13, primals_1, primals_2, buf3, buf9, buf11, buf14, primals_9, reinterpret_tensor(buf4, (1, 16), (1, 1), 0), buf5, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F import torch.nn as nn class MAB(nn.Module): def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None): super().__init__() self.num_heads = num_heads self.fc_q = nn.Linear(dim_X, dim) self.fc_k = nn.Linear(dim_Y, dim) self.fc_v = nn.Linear(dim_Y, dim) self.fc_o = nn.Linear(dim, dim) self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity() self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity() self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity() self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity() def forward(self, X, Y, mask=None): Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y) Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0) K_ = torch.cat(K.chunk(self.num_heads, -1), 0) V_ = torch.cat(V.chunk(self.num_heads, -1), 0) A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1]) if mask is not None: mask = torch.stack([mask] * Q.shape[-2], -2) mask = torch.cat([mask] * self.num_heads, 0) A_logits.masked_fill_(mask, -float('inf')) A = torch.softmax(A_logits, -1) A.masked_fill_(torch.isnan(A), 0.0) else: A = torch.softmax(A_logits, -1) attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1) O = self.ln1(Q + self.dropout1(attn)) O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O)))) return O class PMA(nn.Module): def __init__(self, dim_X, dim, num_inds, **kwargs): super().__init__() self.I = nn.Parameter(torch.Tensor(num_inds, dim)) nn.init.xavier_uniform_(self.I) self.mab = MAB(dim, dim_X, dim, **kwargs) def forward(self, X, mask=None): I = self.I if X.dim() == 2 else self.I.repeat(X.shape[0], 1, 1) return self.mab(I, X, mask=mask) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim_X': 4, 'dim': 4, 'num_inds': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + 4 * x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + 4 * (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (2 + 4 * (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (3 + 4 * (-12 + x0)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x0, tmp22, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask) @triton.jit def triton_poi_fused_add_cat_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = x0 tl.full([1], 0, tl.int64) tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr0 + x1, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1], 2, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr0 + (4 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tmp13 = tl.full([1], 3, tl.int64) tmp14 = tmp1 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr0 + (8 + x1), tmp15 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp1 >= tmp13 tl.full([1], 4, tl.int64) tmp20 = tl.load(in_ptr0 + (12 + x1), tmp17 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.where(tmp15, tmp16, tmp20) tmp22 = tl.where(tmp10, tmp11, tmp21) tmp23 = tl.where(tmp5, tmp6, tmp22) tmp24 = tmp0 + tmp23 tl.store(in_out_ptr0 + x2, tmp24, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, primals_2, reinterpret_tensor( primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_4 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_1, reinterpret_tensor( primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, primals_1, reinterpret_tensor( primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(16)](buf0, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32) triton_poi_fused_cat_0[grid(16)](buf2, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf2, (16, 1), (1, 1), 0) del buf2 triton_poi_fused_cat_0[grid(16)](buf1, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(buf5, (1, 16), (0, 1), 0 ), out=buf6) buf9 = empty_strided_cuda((16, 16), (16, 1), torch.float32) triton_per_fused__softmax_1[grid(16)](buf6, buf9, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf6 buf10 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0) del buf1 extern_kernels.mm(buf9, buf4, out=buf10) buf11 = buf0 del buf0 triton_poi_fused_add_cat_2[grid(16)](buf11, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 extern_kernels.mm(buf11, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_3[grid(16)](buf11, buf12, primals_10, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf12 del primals_10 return (buf13, primals_1, primals_2, buf3, buf9, buf11, buf14, primals_9, reinterpret_tensor(buf4, (1, 16), (1, 1), 0), buf5, primals_3) class MAB(nn.Module): def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None): super().__init__() self.num_heads = num_heads self.fc_q = nn.Linear(dim_X, dim) self.fc_k = nn.Linear(dim_Y, dim) self.fc_v = nn.Linear(dim_Y, dim) self.fc_o = nn.Linear(dim, dim) self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity() self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity() self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity() self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity() def forward(self, X, Y, mask=None): Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y) Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0) K_ = torch.cat(K.chunk(self.num_heads, -1), 0) V_ = torch.cat(V.chunk(self.num_heads, -1), 0) A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1]) if mask is not None: mask = torch.stack([mask] * Q.shape[-2], -2) mask = torch.cat([mask] * self.num_heads, 0) A_logits.masked_fill_(mask, -float('inf')) A = torch.softmax(A_logits, -1) A.masked_fill_(torch.isnan(A), 0.0) else: A = torch.softmax(A_logits, -1) attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1) O = self.ln1(Q + self.dropout1(attn)) O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O)))) return O class PMANew(nn.Module): def __init__(self, dim_X, dim, num_inds, **kwargs): super().__init__() self.I = nn.Parameter(torch.Tensor(num_inds, dim)) nn.init.xavier_uniform_(self.I) self.mab = MAB(dim, dim_X, dim, **kwargs) def forward(self, input_0): primals_1 = self.I primals_2 = self.mab.fc_q.weight primals_4 = self.mab.fc_q.bias primals_3 = self.mab.fc_k.weight primals_6 = self.mab.fc_k.bias primals_5 = self.mab.fc_v.weight primals_8 = self.mab.fc_v.bias primals_7 = self.mab.fc_o.weight primals_10 = self.mab.fc_o.bias primals_9 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
OpenXAIProject/dac
PMA
false
8,701
[ "MIT" ]
17
652776e21b56dcb68839363bb077d5c5ea28d81e
https://github.com/OpenXAIProject/dac/tree/652776e21b56dcb68839363bb077d5c5ea28d81e
GlobalAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/bx/cbxqbi5x6ergwgugds2mtfqlbo6szwuxtzsbdvjl73lycn3nina2.py # Topologically Sorted Source Nodes: [eq, attn_score_3, attn_score_2, attn_score_4], Original ATen: [aten.eq, aten.masked_fill, aten.mul, aten._softmax] # Source node to ATen node mapping: # attn_score_2 => mul # attn_score_3 => full_default, where # attn_score_4 => amax, exp, sub, sum_1 # eq => eq # Graph fragment: # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg2_1, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %arg2_1), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_poi_fused__softmax_eq_masked_fill_mul_0 = async_compile.triton('triton_poi_fused__softmax_eq_masked_fill_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_masked_fill_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_eq_masked_fill_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = tmp3 * tmp0 tmp5 = -9.999999843067494e+17 tmp6 = tl.where(tmp2, tmp5, tmp4) tmp8 = tmp7 == tmp1 tmp10 = tmp9 * tmp7 tmp11 = tl.where(tmp8, tmp5, tmp10) tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp14 = tmp13 == tmp1 tmp16 = tmp15 * tmp13 tmp17 = tl.where(tmp14, tmp5, tmp16) tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp20 = tmp19 == tmp1 tmp22 = tmp21 * tmp19 tmp23 = tl.where(tmp20, tmp5, tmp22) tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tl.store(out_ptr0 + (x0), tmp24, xmask) tl.store(out_ptr1 + (x0), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/pp/cppjz4dxuvgrncmxnw5kpreuob3rkj7zd6kq3uth6xbedmtz2f3j.py # Topologically Sorted Source Nodes: [eq_1, attn_score_5, eq, attn_score_3, attn_score_2, attn_score_4], Original ATen: [aten.eq, aten.masked_fill, aten.mul, aten._softmax] # Source node to ATen node mapping: # attn_score_2 => mul # attn_score_3 => full_default, where # attn_score_4 => amax, div, exp, sub # attn_score_5 => full_default_1, where_1 # eq => eq # eq_1 => eq_1 # Graph fragment: # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg2_1, 0), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg2_1, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %arg2_1), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %div), kwargs = {}) triton_poi_fused__softmax_eq_masked_fill_mul_1 = async_compile.triton('triton_poi_fused__softmax_eq_masked_fill_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_masked_fill_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_eq_masked_fill_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_out_ptr0 + (x2), xmask) tmp7 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = tmp3 * tmp0 tmp5 = -9.999999843067494e+17 tmp6 = tl.where(tmp2, tmp5, tmp4) tmp8 = tmp6 - tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tmp12 = tl.where(tmp2, tmp1, tmp11) tl.store(in_out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/qe/cqeeiitmvmc7ui53bm5y7a4byl7ns254nr75puduye6fahglzoyw.py # Topologically Sorted Source Nodes: [mul_1, attn_memory], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # attn_memory => sum_2 # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %arg3_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.bmm] extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 1), (4, 1, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [eq, attn_score_3, attn_score_2, attn_score_4], Original ATen: [aten.eq, aten.masked_fill, aten.mul, aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_eq_masked_fill_mul_0.run(arg2_1, buf0, buf1, buf2, 4, grid=grid(4), stream=stream0) buf3 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [eq_1, attn_score_5, eq, attn_score_3, attn_score_2, attn_score_4], Original ATen: [aten.eq, aten.masked_fill, aten.mul, aten._softmax] triton_poi_fused__softmax_eq_masked_fill_mul_1.run(buf3, arg2_1, buf1, buf2, 16, grid=grid(16), stream=stream0) del arg2_1 del buf1 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, attn_memory], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_2.run(buf3, arg3_1, buf4, 16, grid=grid(16), stream=stream0) del arg3_1 return (buf3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class GlobalAttention(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. All models compute the output as :math:`c = sum_{j=1}^{SeqLength} a_j H_j` where :math:`a_j` is the softmax of a score function. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`score(H_j,q) = H_j^T q` * general: :math:`score(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`score(H_j, q) = w_a^T tanh(W_a q + U_a h_j)` Args: attn_size (int): dimensionality of query and key attn_type (str): type of attention to use, options [dot,general,mlp] """ def __init__(self, query_size, attn_size, attn_type='dot'): super(GlobalAttention, self).__init__() self.query_size = query_size self.attn_size = attn_size self.attn_type = attn_type if self.attn_type == 'general': self.linear_in = nn.Linear(query_size, attn_size, bias=False) elif self.attn_type == 'mlp': self.linear_query = nn.Linear(query_size, attn_size, bias=True) self.attn_w = nn.Linear(attn_size, 1, bias=False) elif self.attn_type == 'dot': assert self.query_size == self.attn_size def forward(self, query, memory_keys, memory_values, memory_masks): """ Args: query (`FloatTensor`): (batch, query_size) memory_keys (`FloatTensor`): (batch, seq_len, attn_size) memory_values (`FloatTensor`): (batch, seq_len, attn_size) memory_masks (`LongTensor`): (batch, seq_len) Returns: attn_score: attention distributions (batch, seq_len) attn_memory: computed context vector, (batch, attn_size) """ batch_size, seq_len, attn_size = memory_keys.size() if self.attn_type == 'mlp': query_hidden = self.linear_query(query.unsqueeze(1)).expand( batch_size, seq_len, attn_size) attn_hidden = torch.tanh(query_hidden + memory_keys) attn_score = self.attn_w(attn_hidden) elif self.attn_type == 'dot': attn_score = torch.bmm(memory_keys, query.unsqueeze(2)) elif self.attn_type == 'general': query_hidden = self.linear_in(query) attn_score = torch.bmm(memory_keys, query_hidden.unsqueeze(2)) attn_score = attn_score.squeeze(2) if memory_masks is not None: attn_score = attn_score * memory_masks attn_score = attn_score.masked_fill(memory_masks == 0, -1e+18) attn_score = F.softmax(attn_score, dim=1) if memory_masks is not None: attn_score = attn_score.masked_fill(memory_masks == 0, 0) attn_memory = torch.sum(attn_score.unsqueeze(2) * memory_values, 1) return attn_score, attn_memory def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'query_size': 4, 'attn_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_eq_masked_fill_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = tmp3 * tmp0 tmp5 = -9.999999843067494e+17 tmp6 = tl.where(tmp2, tmp5, tmp4) tmp8 = tmp7 == tmp1 tmp10 = tmp9 * tmp7 tmp11 = tl.where(tmp8, tmp5, tmp10) tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp14 = tmp13 == tmp1 tmp16 = tmp15 * tmp13 tmp17 = tl.where(tmp14, tmp5, tmp16) tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp20 = tmp19 == tmp1 tmp22 = tmp21 * tmp19 tmp23 = tl.where(tmp20, tmp5, tmp22) tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tl.store(out_ptr0 + x0, tmp24, xmask) tl.store(out_ptr1 + x0, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_eq_masked_fill_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = tmp3 * tmp0 tmp5 = -9.999999843067494e+17 tmp6 = tl.where(tmp2, tmp5, tmp4) tmp8 = tmp6 - tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tmp12 = tl.where(tmp2, tmp1, tmp11) tl.store(in_out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 1), (4, 1, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_eq_masked_fill_mul_0[grid(4)](arg2_1, buf0, buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_eq_masked_fill_mul_1[grid(16)](buf3, arg2_1, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg2_1 del buf1 del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(16)](buf3, arg3_1, buf4, 16, XBLOCK =16, num_warps=1, num_stages=1) del arg3_1 return buf3, buf4 class GlobalAttentionNew(nn.Module): """ Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. Constructs a unit mapping a query `q` of size `dim` and a source matrix `H` of size `n x dim`, to an output of size `dim`. All models compute the output as :math:`c = sum_{j=1}^{SeqLength} a_j H_j` where :math:`a_j` is the softmax of a score function. However they differ on how they compute the attention score. * Luong Attention (dot, general): * dot: :math:`score(H_j,q) = H_j^T q` * general: :math:`score(H_j, q) = H_j^T W_a q` * Bahdanau Attention (mlp): * :math:`score(H_j, q) = w_a^T tanh(W_a q + U_a h_j)` Args: attn_size (int): dimensionality of query and key attn_type (str): type of attention to use, options [dot,general,mlp] """ def __init__(self, query_size, attn_size, attn_type='dot'): super(GlobalAttentionNew, self).__init__() self.query_size = query_size self.attn_size = attn_size self.attn_type = attn_type if self.attn_type == 'general': self.linear_in = nn.Linear(query_size, attn_size, bias=False) elif self.attn_type == 'mlp': self.linear_query = nn.Linear(query_size, attn_size, bias=True) self.attn_w = nn.Linear(attn_size, 1, bias=False) elif self.attn_type == 'dot': assert self.query_size == self.attn_size def forward(self, input_0, input_1, input_2, input_3): arg1_1 = input_0 arg0_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1]
Roc-Ng/HANet
GlobalAttention
false
8,702
[ "MIT" ]
34
e679703e9e725205424d87f750358fb4f62ceec5
https://github.com/Roc-Ng/HANet/tree/e679703e9e725205424d87f750358fb4f62ceec5
ScoreLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/eg/cegl5vqhcbzcqrandmb7ig5xykm3zgwjf7eyt266lmyvluezoop6.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision.transforms import functional as F from torch.nn import functional as F import torch.nn as nn class ScoreLayer(nn.Module): def __init__(self, k): super(ScoreLayer, self).__init__() self.score = nn.Conv2d(k, 1, 1, 1) def forward(self, x, x_size=None): x = self.score(x) if x_size is not None: x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners =True) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class ScoreLayerNew(nn.Module): def __init__(self, k): super(ScoreLayerNew, self).__init__() self.score = nn.Conv2d(k, 1, 1, 1) def forward(self, input_0): primals_1 = self.score.weight primals_2 = self.score.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Res2Net/Res2Net-PoolNet
ScoreLayer
false
8,703
[ "MIT" ]
35
7bef0652e83a6c4ebe4ed47f1b03ab5b7b16074a
https://github.com/Res2Net/Res2Net-PoolNet/tree/7bef0652e83a6c4ebe4ed47f1b03ab5b7b16074a
ISAB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/hv/chv7m4fc52mdvsvi543asiruw7qww65fwfdxhjiuhvfn5lqumlpq.py # Topologically Sorted Source Nodes: [Q_], Original ATen: [aten.cat] # Source node to ATen node mapping: # Q_ => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_1, %getitem_2, %getitem_3],), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4*x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + (4*((-4) + x0))), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (2 + (4*((-8) + x0))), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (3 + (4*((-12) + x0))), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + (x0), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/tj/ctj2xnoux3cn7w56gxs6hpicdagsiu4zb4dom4od6voujshwgzie.py # Topologically Sorted Source Nodes: [A], Original ATen: [aten._softmax] # Source node to ATen node mapping: # A => div_1, exp, sum_1 # Graph fragment: # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [-1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float("-inf")) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/fq/cfqwft6dz4fpsvuwioex77z56kqurxctenywfa72old47ny4ciza.py # Topologically Sorted Source Nodes: [attn, O], Original ATen: [aten.cat, aten.add] # Source node to ATen node mapping: # O => add # attn => cat_3 # Graph fragment: # %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem_12, %getitem_13, %getitem_14, %getitem_15], -1), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%addmm, %cat_3), kwargs = {}) triton_poi_fused_add_cat_2 = async_compile.triton('triton_poi_fused_add_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cat_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = x0 tmp2 = tl.full([1], 0, tl.int64) tmp3 = tmp1 >= tmp2 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr0 + (x1), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1], 2, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr0 + (4 + x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tmp13 = tl.full([1], 3, tl.int64) tmp14 = tmp1 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr0 + (8 + x1), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp1 >= tmp13 tmp18 = tl.full([1], 4, tl.int64) tmp19 = tmp1 < tmp18 tmp20 = tl.load(in_ptr0 + (12 + x1), tmp17 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tl.where(tmp15, tmp16, tmp20) tmp22 = tl.where(tmp10, tmp11, tmp21) tmp23 = tl.where(tmp5, tmp6, tmp22) tmp24 = tmp0 + tmp23 tl.store(in_out_ptr0 + (x2), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vi/cvia7hnsmbaj62zpkiccjajpbfnpgvwttuvei34ggjsqqzpejdng.py # Topologically Sorted Source Nodes: [relu, O_1], Original ATen: [aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # O_1 => add_1 # relu => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_10), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %relu), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [Q], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_4 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [K], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, primals_1, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [V], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, primals_1, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [Q_], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, buf3, 16, grid=grid(16), stream=stream0) buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [V_], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf2, buf4, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf2, (16, 1), (1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [K_], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf1, buf5, 16, grid=grid(16), stream=stream0) buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(buf3, reinterpret_tensor(buf5, (1, 16), (0, 1), 0), out=buf6) buf9 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [A], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf6, buf9, 16, 16, grid=grid(16), stream=stream0) buf10 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm] extern_kernels.mm(buf9, buf4, out=buf10) buf11 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attn, O], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_2.run(buf11, buf10, 16, grid=grid(16), stream=stream0) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf11, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu, O_1], Original ATen: [aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_3.run(buf11, buf12, primals_10, buf13, buf29, 16, grid=grid(16), stream=stream0) del primals_10 buf14 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [Q_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_12, primals_1, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14) del primals_11 del primals_12 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [K_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_14, buf13, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_14 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [V_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_16, buf13, reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_16 buf17 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [Q__1], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf14, buf17, 16, grid=grid(16), stream=stream0) buf18 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [V__1], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf16, buf18, 16, grid=grid(16), stream=stream0) buf19 = reinterpret_tensor(buf16, (16, 1), (1, 1), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [K__1], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf15, buf19, 16, grid=grid(16), stream=stream0) buf20 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm] extern_kernels.mm(buf17, reinterpret_tensor(buf19, (1, 16), (0, 1), 0), out=buf20) buf23 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [A_1], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf20, buf23, 16, 16, grid=grid(16), stream=stream0) del buf20 buf24 = reinterpret_tensor(buf15, (16, 1), (1, 1), 0); del buf15 # reuse # Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.mm] extern_kernels.mm(buf23, buf18, out=buf24) buf25 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [attn_1, O_2], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_2.run(buf25, buf24, 16, grid=grid(16), stream=stream0) buf26 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0); del buf24 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf25, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf26) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_1, O_3], Original ATen: [aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_3.run(buf25, buf26, primals_18, buf27, buf28, 16, grid=grid(16), stream=stream0) del buf26 del primals_18 return (buf27, primals_1, primals_2, buf3, buf9, buf11, buf13, buf17, buf23, buf25, buf28, primals_17, reinterpret_tensor(buf18, (1, 16), (1, 1), 0), buf19, primals_15, primals_13, buf29, primals_9, reinterpret_tensor(buf4, (1, 16), (1, 1), 0), buf5, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F import torch.nn as nn class MAB(nn.Module): def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None): super().__init__() self.num_heads = num_heads self.fc_q = nn.Linear(dim_X, dim) self.fc_k = nn.Linear(dim_Y, dim) self.fc_v = nn.Linear(dim_Y, dim) self.fc_o = nn.Linear(dim, dim) self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity() self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity() self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity() self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity() def forward(self, X, Y, mask=None): Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y) Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0) K_ = torch.cat(K.chunk(self.num_heads, -1), 0) V_ = torch.cat(V.chunk(self.num_heads, -1), 0) A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1]) if mask is not None: mask = torch.stack([mask] * Q.shape[-2], -2) mask = torch.cat([mask] * self.num_heads, 0) A_logits.masked_fill_(mask, -float('inf')) A = torch.softmax(A_logits, -1) A.masked_fill_(torch.isnan(A), 0.0) else: A = torch.softmax(A_logits, -1) attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1) O = self.ln1(Q + self.dropout1(attn)) O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O)))) return O class PMA(nn.Module): def __init__(self, dim_X, dim, num_inds, **kwargs): super().__init__() self.I = nn.Parameter(torch.Tensor(num_inds, dim)) nn.init.xavier_uniform_(self.I) self.mab = MAB(dim, dim_X, dim, **kwargs) def forward(self, X, mask=None): I = self.I if X.dim() == 2 else self.I.repeat(X.shape[0], 1, 1) return self.mab(I, X, mask=mask) class ISAB(nn.Module): def __init__(self, dim_X, dim, num_inds, **kwargs): super().__init__() self.pma = PMA(dim_X, dim, num_inds, **kwargs) self.mab = MAB(dim_X, dim, dim, **kwargs) def forward(self, X, mask=None): return self.mab(X, self.pma(X, mask=mask)) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim_X': 4, 'dim': 4, 'num_inds': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + 4 * x0, tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (1 + 4 * (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (2 + 4 * (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (3 + 4 * (-12 + x0)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tl.store(out_ptr0 + x0, tmp22, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask) @triton.jit def triton_poi_fused_add_cat_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = x0 tl.full([1], 0, tl.int64) tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr0 + x1, tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1], 2, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr0 + (4 + x1), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tmp13 = tl.full([1], 3, tl.int64) tmp14 = tmp1 < tmp13 tmp15 = tmp12 & tmp14 tmp16 = tl.load(in_ptr0 + (8 + x1), tmp15 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp1 >= tmp13 tl.full([1], 4, tl.int64) tmp20 = tl.load(in_ptr0 + (12 + x1), tmp17 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.where(tmp15, tmp16, tmp20) tmp22 = tl.where(tmp10, tmp11, tmp21) tmp23 = tl.where(tmp5, tmp6, tmp22) tmp24 = tmp0 + tmp23 tl.store(in_out_ptr0 + x2, tmp24, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, primals_2, reinterpret_tensor( primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_4 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_1, reinterpret_tensor( primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_5 del primals_6 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, primals_1, reinterpret_tensor( primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(16)](buf0, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32) triton_poi_fused_cat_0[grid(16)](buf2, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf2, (16, 1), (1, 1), 0) del buf2 triton_poi_fused_cat_0[grid(16)](buf1, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(buf5, (1, 16), (0, 1), 0 ), out=buf6) buf9 = empty_strided_cuda((16, 16), (16, 1), torch.float32) triton_per_fused__softmax_1[grid(16)](buf6, buf9, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf10 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0) del buf1 extern_kernels.mm(buf9, buf4, out=buf10) buf11 = buf0 del buf0 triton_poi_fused_add_cat_2[grid(16)](buf11, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 extern_kernels.mm(buf11, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf12) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_3[grid(16)](buf11, buf12, primals_10, buf13, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_10 buf14 = buf12 del buf12 extern_kernels.addmm(primals_12, primals_1, reinterpret_tensor( primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14) del primals_11 del primals_12 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_14, buf13, reinterpret_tensor( primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_14 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_16, buf13, reinterpret_tensor( primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_16 buf17 = empty_strided_cuda((16, 1), (1, 1), torch.float32) triton_poi_fused_cat_0[grid(16)](buf14, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((16, 1), (1, 1), torch.float32) triton_poi_fused_cat_0[grid(16)](buf16, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = reinterpret_tensor(buf16, (16, 1), (1, 1), 0) del buf16 triton_poi_fused_cat_0[grid(16)](buf15, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) buf20 = buf6 del buf6 extern_kernels.mm(buf17, reinterpret_tensor(buf19, (1, 16), (0, 1), 0), out=buf20) buf23 = empty_strided_cuda((16, 16), (16, 1), torch.float32) triton_per_fused__softmax_1[grid(16)](buf20, buf23, 16, 16, XBLOCK= 8, num_warps=2, num_stages=1) del buf20 buf24 = reinterpret_tensor(buf15, (16, 1), (1, 1), 0) del buf15 extern_kernels.mm(buf23, buf18, out=buf24) buf25 = buf14 del buf14 triton_poi_fused_add_cat_2[grid(16)](buf25, buf24, 16, XBLOCK=16, num_warps=1, num_stages=1) buf26 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0) del buf24 extern_kernels.mm(buf25, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf26) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_3[grid(16)](buf25, buf26, primals_18, buf27, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf26 del primals_18 return (buf27, primals_1, primals_2, buf3, buf9, buf11, buf13, buf17, buf23, buf25, buf28, primals_17, reinterpret_tensor(buf18, (1, 16), (1, 1), 0), buf19, primals_15, primals_13, buf29, primals_9, reinterpret_tensor(buf4, (1, 16), (1, 1), 0), buf5, primals_3) class MAB(nn.Module): def __init__(self, dim_X, dim_Y, dim, num_heads=4, ln=False, p=None): super().__init__() self.num_heads = num_heads self.fc_q = nn.Linear(dim_X, dim) self.fc_k = nn.Linear(dim_Y, dim) self.fc_v = nn.Linear(dim_Y, dim) self.fc_o = nn.Linear(dim, dim) self.ln1 = nn.LayerNorm(dim) if ln else nn.Identity() self.ln2 = nn.LayerNorm(dim) if ln else nn.Identity() self.dropout1 = nn.Dropout(p=p) if p is not None else nn.Identity() self.dropout2 = nn.Dropout(p=p) if p is not None else nn.Identity() def forward(self, X, Y, mask=None): Q, K, V = self.fc_q(X), self.fc_k(Y), self.fc_v(Y) Q_ = torch.cat(Q.chunk(self.num_heads, -1), 0) K_ = torch.cat(K.chunk(self.num_heads, -1), 0) V_ = torch.cat(V.chunk(self.num_heads, -1), 0) A_logits = Q_ @ K_.transpose(-2, -1) / math.sqrt(Q.shape[-1]) if mask is not None: mask = torch.stack([mask] * Q.shape[-2], -2) mask = torch.cat([mask] * self.num_heads, 0) A_logits.masked_fill_(mask, -float('inf')) A = torch.softmax(A_logits, -1) A.masked_fill_(torch.isnan(A), 0.0) else: A = torch.softmax(A_logits, -1) attn = torch.cat((A @ V_).chunk(self.num_heads, 0), -1) O = self.ln1(Q + self.dropout1(attn)) O = self.ln2(O + self.dropout2(F.relu(self.fc_o(O)))) return O class PMA(nn.Module): def __init__(self, dim_X, dim, num_inds, **kwargs): super().__init__() self.I = nn.Parameter(torch.Tensor(num_inds, dim)) nn.init.xavier_uniform_(self.I) self.mab = MAB(dim, dim_X, dim, **kwargs) def forward(self, X, mask=None): I = self.I if X.dim() == 2 else self.I.repeat(X.shape[0], 1, 1) return self.mab(I, X, mask=mask) class ISABNew(nn.Module): def __init__(self, dim_X, dim, num_inds, **kwargs): super().__init__() self.pma = PMA(dim_X, dim, num_inds, **kwargs) self.mab = MAB(dim_X, dim, dim, **kwargs) def forward(self, input_0): primals_1 = self.pma.I primals_2 = self.pma.mab.fc_q.weight primals_4 = self.pma.mab.fc_q.bias primals_3 = self.pma.mab.fc_k.weight primals_6 = self.pma.mab.fc_k.bias primals_5 = self.pma.mab.fc_v.weight primals_8 = self.pma.mab.fc_v.bias primals_7 = self.pma.mab.fc_o.weight primals_10 = self.pma.mab.fc_o.bias primals_9 = self.mab.fc_q.weight primals_12 = self.mab.fc_q.bias primals_11 = self.mab.fc_k.weight primals_14 = self.mab.fc_k.bias primals_13 = self.mab.fc_v.weight primals_16 = self.mab.fc_v.bias primals_15 = self.mab.fc_o.weight primals_18 = self.mab.fc_o.bias primals_17 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
OpenXAIProject/dac
ISAB
false
8,704
[ "MIT" ]
17
652776e21b56dcb68839363bb077d5c5ea28d81e
https://github.com/OpenXAIProject/dac/tree/652776e21b56dcb68839363bb077d5c5ea28d81e
ExponentialDecay
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/46/c46xb7vmp6upiptnf2nzsamtfqzcr6ve7jp6xugquejgk4ihbzb5.py # Topologically Sorted Source Nodes: [mul, mul_1, state_1, mul_2, mul_3, state_2, mul_4, mul_5, state_3, mul_6, mul_7, state_4], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # state_1 => add # state_2 => add_1 # state_3 => add_2 # state_4 => add_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, -3.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 4.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, -3.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 4.0), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, -3.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4.0), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_12, -3.0), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 4.0), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %mul_7), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (192 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (64 + x0), xmask) tmp7 = tl.load(in_ptr0 + (x0), xmask) tmp1 = -3.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = 4.0 tmp10 = tmp7 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = tmp11 * tmp9 tmp13 = tmp6 + tmp12 tmp14 = tmp13 * tmp9 tmp15 = tmp4 + tmp14 tmp16 = tmp15 * tmp9 tmp17 = tmp2 + tmp16 tl.store(out_ptr0 + (x0), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/bh/cbh25okt56l476nhw6rw2nxfx3u3s2joy6rljl3uwg6d724c3vqp.py # Topologically Sorted Source Nodes: [mul, mul_1, state_1, mul_2, mul_3, state_2, mul_4, mul_5, state_3], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # state_1 => add # state_2 => add_1 # state_3 => add_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, -3.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 4.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%permute, %add, 0, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, -3.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 4.0), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %select_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %add_1, 0, 1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, -3.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4.0), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %select_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %add_2, 0, 2), kwargs = {}) # %select_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_2, %add_3, 0, 3), kwargs = {}) triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) x0 = xindex % 64 x2 = xindex tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (x2), xmask) tmp0 = x1 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl.full([1], 2, tl.int32) tmp5 = tmp0 == tmp4 tmp7 = -3.0 tmp8 = tmp6 * tmp7 tmp10 = tmp9 * tmp7 tmp12 = tmp11 * tmp7 tmp13 = 4.0 tmp14 = tmp11 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp15 * tmp13 tmp17 = tmp10 + tmp16 tmp18 = tmp17 * tmp13 tmp19 = tmp8 + tmp18 tmp20 = tl.full([1], 1, tl.int32) tmp21 = tmp0 == tmp20 tmp22 = tl.full([1], 0, tl.int32) tmp23 = tmp0 == tmp22 tmp25 = tl.where(tmp23, tmp15, tmp24) tmp26 = tl.where(tmp21, tmp17, tmp25) tmp27 = tl.where(tmp5, tmp19, tmp26) tmp28 = tl.where(tmp2, tmp3, tmp27) tl.store(out_ptr0 + (x2), tmp28, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, state_1, mul_2, mul_3, state_2, mul_4, mul_5, state_3, mul_6, mul_7, state_4], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(arg0_1, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, state_1, mul_2, mul_3, state_2, mul_4, mul_5, state_3], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_1.run(buf1, arg0_1, buf0, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 del buf0 return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from torch import nn from torch.jit import Final class ExponentialUpdate(nn.Module): alpha: 'Final[int]' def __init__(self, alpha: 'float'): super().__init__() self.alpha = float(alpha) def forward(self, x: 'Tensor', state: 'Tensor') ->Tensor: return x * (1 - self.alpha) + state * self.alpha class ExponentialDecay(nn.Module): def __init__(self, alpha: 'float'): super().__init__() self.update_rule = ExponentialUpdate(alpha) def forward(self, x: 'Tensor', state: 'Optional[Tensor]'=None): out = torch.empty_like(x) if state is None: state = x[0] for t in range(x.shape[0]): state = self.update_rule(x[t], state) out[t] = state return out, state def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'alpha': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import Tensor from torch import nn from torch.jit import Final assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (192 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (64 + x0), xmask) tmp7 = tl.load(in_ptr0 + x0, xmask) tmp1 = -3.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = 4.0 tmp10 = tmp7 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = tmp11 * tmp9 tmp13 = tmp6 + tmp12 tmp14 = tmp13 * tmp9 tmp15 = tmp4 + tmp14 tmp16 = tmp15 * tmp9 tmp17 = tmp2 + tmp16 tl.store(out_ptr0 + x0, tmp17, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + x2, xmask) tmp0 = x1 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = tl.full([1], 2, tl.int32) tmp5 = tmp0 == tmp4 tmp7 = -3.0 tmp8 = tmp6 * tmp7 tmp10 = tmp9 * tmp7 tmp12 = tmp11 * tmp7 tmp13 = 4.0 tmp14 = tmp11 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp15 * tmp13 tmp17 = tmp10 + tmp16 tmp18 = tmp17 * tmp13 tmp19 = tmp8 + tmp18 tmp20 = tl.full([1], 1, tl.int32) tmp21 = tmp0 == tmp20 tmp22 = tl.full([1], 0, tl.int32) tmp23 = tmp0 == tmp22 tmp25 = tl.where(tmp23, tmp15, tmp24) tmp26 = tl.where(tmp21, tmp17, tmp25) tmp27 = tl.where(tmp5, tmp19, tmp26) tmp28 = tl.where(tmp2, tmp3, tmp27) tl.store(out_ptr0 + x2, tmp28, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(64)](arg0_1, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_1[grid(256)](buf1, arg0_1, buf0, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf0 return buf2, buf1 class ExponentialUpdate(nn.Module): alpha: 'Final[int]' def __init__(self, alpha: 'float'): super().__init__() self.alpha = float(alpha) def forward(self, x: 'Tensor', state: 'Tensor') ->Tensor: return x * (1 - self.alpha) + state * self.alpha class ExponentialDecayNew(nn.Module): def __init__(self, alpha: 'float'): super().__init__() self.update_rule = ExponentialUpdate(alpha) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
Rikorose/clc-dns-challenge-2020
ExponentialDecay
false
8,705
[ "Apache-2.0" ]
12
4f1c078691327a75b3a338fe372ba356b450a6da
https://github.com/Rikorose/clc-dns-challenge-2020/tree/4f1c078691327a75b3a338fe372ba356b450a6da
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/77/c77l5czq6smedgisqkyxix4cnvkwxbplk6btqx6b6aql6bshbsm2.py # Topologically Sorted Source Nodes: [mean, std, sub, add, truediv, mul, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mean => mean # mul => mul # std => sqrt, var # sub => sub # truediv => div # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, add, truediv, mul, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Callable from typing import Tuple import torch.utils.data from typing import Union import torch.nn import torch.cuda import torch.backends.cudnn def batch_elementwise(input: 'torch.Tensor', param: 'torch.Tensor', op: 'Callable[[torch.Tensor, torch.Tensor], torch.Tensor]', input_batch_dim: 'int'=0, pndim: 'int'=1) ->torch.Tensor: """ Do elementwise operation in groups. :param input: input, any shape, [..., Ci, Cj, ...] :param param: the parameter, shape [N, Ci, Cj....], in which case B % N == 0, or [Ci, Cj....] :param input_batch_dim: which dimension is the batch in the input :param op: the operation to perform :param pndim: number of parameter dimensions without batch :return: input with the op performed, the same shape as input """ if param.ndim == pndim + 1: param = param.squeeze(0) if param.ndim == pndim: return op(input, param) assert param.ndim == pndim + 1 assert input.shape[input_batch_dim] % param.shape[0] == 0 input_r = input.view(*input.shape[:input_batch_dim], param.shape[0], -1, *input.shape[input_batch_dim + 1:]) param_r = param.view(*([1] * input_batch_dim), param.shape[0], *([1] * (input_r.ndim - input_batch_dim - param.ndim)), *param.shape[1:]) return op(input_r, param_r).view_as(input) def batch_bias_add(*args, **kwargs) ->torch.Tensor: """ Batch add bias to the inputs. For more details, see batch_elementwise """ return batch_elementwise(*args, op=lambda a, b: a + b, **kwargs) def batch_const_mul(*args, **kwargs) ->torch.Tensor: """ Batch multiplies bias to the inputs. For more details, see batch_elementwise """ return batch_elementwise(*args, op=lambda a, b: a * b, **kwargs) class MaskedModule(torch.nn.Module): pass class LayerNorm(MaskedModule): def __init__(self, normalized_shape: 'Union[int, Tuple[int]]', eps=1e-05): super().__init__() if isinstance(normalized_shape, int): normalized_shape = normalized_shape, self.gamma = torch.nn.Parameter(torch.ones(*normalized_shape)) self.beta = torch.nn.Parameter(torch.zeros(*normalized_shape)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return batch_bias_add(batch_const_mul((x - mean) / (std + self.eps), self.gamma), self.beta) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'normalized_shape': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from typing import Callable from typing import Tuple import torch.utils.data from typing import Union import torch.nn import torch.cuda import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 def batch_elementwise(input: 'torch.Tensor', param: 'torch.Tensor', op: 'Callable[[torch.Tensor, torch.Tensor], torch.Tensor]', input_batch_dim: 'int'=0, pndim: 'int'=1) ->torch.Tensor: """ Do elementwise operation in groups. :param input: input, any shape, [..., Ci, Cj, ...] :param param: the parameter, shape [N, Ci, Cj....], in which case B % N == 0, or [Ci, Cj....] :param input_batch_dim: which dimension is the batch in the input :param op: the operation to perform :param pndim: number of parameter dimensions without batch :return: input with the op performed, the same shape as input """ if param.ndim == pndim + 1: param = param.squeeze(0) if param.ndim == pndim: return op(input, param) assert param.ndim == pndim + 1 assert input.shape[input_batch_dim] % param.shape[0] == 0 input_r = input.view(*input.shape[:input_batch_dim], param.shape[0], -1, *input.shape[input_batch_dim + 1:]) param_r = param.view(*([1] * input_batch_dim), param.shape[0], *([1] * (input_r.ndim - input_batch_dim - param.ndim)), *param.shape[1:]) return op(input_r, param_r).view_as(input) def batch_bias_add(*args, **kwargs) ->torch.Tensor: """ Batch add bias to the inputs. For more details, see batch_elementwise """ return batch_elementwise(*args, op=lambda a, b: a + b, **kwargs) def batch_const_mul(*args, **kwargs) ->torch.Tensor: """ Batch multiplies bias to the inputs. For more details, see batch_elementwise """ return batch_elementwise(*args, op=lambda a, b: a * b, **kwargs) class MaskedModule(torch.nn.Module): pass class LayerNormNew(MaskedModule): def __init__(self, normalized_shape: 'Union[int, Tuple[int]]', eps=1e-05): super().__init__() if isinstance(normalized_shape, int): normalized_shape = normalized_shape, self.gamma = torch.nn.Parameter(torch.ones(*normalized_shape)) self.beta = torch.nn.Parameter(torch.zeros(*normalized_shape)) self.eps = eps def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RobertCsordas/modules
LayerNorm
false
8,706
[ "BSD-3-Clause" ]
22
efdb8790b074862581e035c9ab5bf889440a8023
https://github.com/RobertCsordas/modules/tree/efdb8790b074862581e035c9ab5bf889440a8023
GeneralAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/wz/cwzlgmghy6nxuchbiog4puo46i4tq7yhd3qu6ftkgjf3gwib6hxn.py # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yh/cyhf6bhaqimi2pucos5fnrpvhrt4vuaetbxnooyr5pvgjt7s6fgo.py # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, primals_4, out=buf4) return (buf4, buf3, buf3, reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0), primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BaseAttention(nn.Module): def __init__(self): super().__init__() def forward(self, *args, **kwargs): raise NotImplementedError class GeneralAttention(BaseAttention): """General Attention""" def __init__(self, q_dim, k_dim, dropout_rate=0.0): super().__init__() self.weights = nn.Parameter(torch.empty(q_dim, k_dim)) self.dropout = nn.Dropout(dropout_rate) self._init_weights() def _init_weights(self): torch.nn.init.xavier_uniform_(self.weights) def forward(self, q, k, v, attn_mask=None): """Forward Args: q (torch.Tensor): Query matrix, (B, T_q, D_q) k (torch.Tensor): Key matrix, (B, T_k, D_k) v (torch.Tensor): Value matrix, (B, T_v, D_v) T_v = T_k, D_v = D_k attn_mask (torch.BoolTensor | None): Mask tensor. True element will be masked. Returns: output (B, T_q, D_v); attention (B, T_q, T_k) """ attention = q.matmul(self.weights).bmm(k.permute(0, 2, 1)) if attn_mask is not None: attention.masked_fill_(attn_mask, -np.inf) attention = F.softmax(attention, dim=-1) attention = self.dropout(attention) output = attention.bmm(v) return output, attention def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'q_dim': 4, 'k_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 extern_kernels.bmm(buf3, primals_4, out=buf4) return buf4, buf3, buf3, reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0), primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0) class BaseAttention(nn.Module): def __init__(self): super().__init__() def forward(self, *args, **kwargs): raise NotImplementedError class GeneralAttentionNew(BaseAttention): """General Attention""" def __init__(self, q_dim, k_dim, dropout_rate=0.0): super().__init__() self.weights = nn.Parameter(torch.empty(q_dim, k_dim)) self.dropout = nn.Dropout(dropout_rate) self._init_weights() def _init_weights(self): torch.nn.init.xavier_uniform_(self.weights) def forward(self, input_0, input_1, input_2): primals_2 = self.weights primals_1 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
ROBINADC/BiGRU-CRF-with-Attention-for-NER
GeneralAttention
false
8,707
[ "MIT" ]
27
b9e037ebd6e1d56500ffb60c6030013982c17ded
https://github.com/ROBINADC/BiGRU-CRF-with-Attention-for-NER/tree/b9e037ebd6e1d56500ffb60c6030013982c17ded
SoftDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/so/csoxzltek5j7gfukslhmbppf53tvyjqk7hs7p7hq4p7vg6t7wyrv.py # Topologically Sorted Source Nodes: [tp, sub, fp, sub_1, fn], Original ATen: [aten.mul, aten.rsub] # Source node to ATen node mapping: # fn => mul_2 # fp => mul_1 # sub => sub # sub_1 => sub_1 # tp => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sub), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {}) triton_poi_fused_mul_rsub_0 = async_compile.triton('triton_poi_fused_mul_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp1 tmp5 = tmp0 * tmp4 tmp6 = tmp3 - tmp0 tmp7 = tmp6 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr1 + (x0), tmp5, xmask) tl.store(out_ptr2 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tp, sub, fp, sub_1, fn], Original ATen: [aten.mul, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_mul_rsub_0.run(arg0_1, arg1_1, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn import torch.nn.functional def sum_tensor(inp, axes, keepdim=False): axes = np.unique(axes).astype(int) if keepdim: for ax in axes: inp = inp.sum(int(ax), keepdim=True) else: for ax in sorted(axes, reverse=True): inp = inp.sum(int(ax)) return inp def get_tp_fp_fn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]): y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x) if net_output.device.type == 'cuda': y_onehot = y_onehot y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tp = sum_tensor(tp, axes, keepdim=False) fp = sum_tensor(fp, axes, keepdim=False) fn = sum_tensor(fn, axes, keepdim=False) return tp, fp, fn class SoftDiceLoss(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.0, square=False): """ """ super(SoftDiceLoss, self).__init__() self.square = square self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth def forward(self, x, y, loss_mask=None): shp_x = x.shape if self.batch_dice: axes = [0] + list(range(2, len(shp_x))) else: axes = list(range(2, len(shp_x))) if self.apply_nonlin is not None: x = self.apply_nonlin(x) tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square) dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth) if not self.do_bg: if self.batch_dice: dc = dc[1:] else: dc = dc[:, 1:] dc = dc.mean() return -dc def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np from torch import nn import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp1 tmp5 = tmp0 * tmp4 tmp6 = tmp3 - tmp0 tmp7 = tmp6 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) tl.store(out_ptr2 + x0, tmp7, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_rsub_0[grid(256)](arg0_1, arg1_1, buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1, buf2 def sum_tensor(inp, axes, keepdim=False): axes = np.unique(axes).astype(int) if keepdim: for ax in axes: inp = inp.sum(int(ax), keepdim=True) else: for ax in sorted(axes, reverse=True): inp = inp.sum(int(ax)) return inp def get_tp_fp_fn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]): y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x) if net_output.device.type == 'cuda': y_onehot = y_onehot y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tp = sum_tensor(tp, axes, keepdim=False) fp = sum_tensor(fp, axes, keepdim=False) fn = sum_tensor(fn, axes, keepdim=False) return tp, fp, fn class SoftDiceLossNew(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.0, square=False): """ """ super(SoftDiceLossNew, self).__init__() self.square = square self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Ramsha04/kits19_cnn
SoftDiceLoss
false
8,708
[ "Apache-2.0" ]
15
0c1c861ca1a211a840a77e52895548e8d8033470
https://github.com/Ramsha04/kits19_cnn/tree/0c1c861ca1a211a840a77e52895548e8d8033470
DeConvNet64
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/vk/cvkfkcxx2eq6evt22oohystebz2yozj4mjiqnsskws4gbjhocgvw.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 495616 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 121) % 1024 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ol/colybmpnaokx67ittdzvagjpdwvsnv2xyytilrk4tza6wuqaj4mi.py # Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_3 => convolution_1 # input_4 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 991232 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 484) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ih/cih4pste5qxfm3mjsemycqwwbafvlkkalo5mq4wlkyr4ucvqegu7.py # Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_5 => convolution_2 # input_6 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1982464 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1936) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ph/cph3b64ryhg5hvukx5u6fmmqptyw5iuisa7uadoo4w6u2saxwgs7.py # Topologically Sorted Source Nodes: [input_7, input_8], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_7 => convolution_3 # input_8 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3964928 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 7744) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/fp/cfprvosswwsmsfodeidueehvqzc4q3bj7yyxhuzjwrua6n3gypnc.py # Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.convolution] # Source node to ATen node mapping: # input_9 => convolution_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 92928 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 7744) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (64, 1024, 8, 8), (65536, 64, 8, 1)) assert_size_stride(primals_2, (1024, ), (1, )) assert_size_stride(primals_3, (4, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_4, (1024, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_5, (512, ), (1, )) assert_size_stride(primals_6, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (128, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_11, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1024, 11, 11), (123904, 121, 11, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 495616, grid=grid(495616), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 512, 22, 22), (247808, 484, 22, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 991232, grid=grid(991232), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 44, 44), (495616, 1936, 44, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 1982464, grid=grid(1982464), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 88, 88), (991232, 7744, 88, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [input_7, input_8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf7, primals_9, 3964928, grid=grid(3964928), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 3, 88, 88), (23232, 7744, 88, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf9, primals_11, 92928, grid=grid(92928), stream=stream0) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 1024, 8, 8), (65536, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1024, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((512, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def get_activation(s_act): if s_act == 'relu': return nn.ReLU(inplace=True) elif s_act == 'sigmoid': return nn.Sigmoid() elif s_act == 'softplus': return nn.Softplus() elif s_act == 'linear': return None elif s_act == 'tanh': return nn.Tanh() elif s_act == 'leakyrelu': return nn.LeakyReLU(0.2, inplace=True) elif s_act == 'softmax': return nn.Softmax(dim=1) elif s_act == 'spherical': return SphericalActivation() else: raise ValueError(f'Unexpected activation: {s_act}') class SphericalActivation(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x / x.norm(p=2, dim=1, keepdim=True) class DeConvNet64(nn.Module): """ConvNet architecture for CelebA64 following Ghosh et al., 2019""" def __init__(self, in_chan=64, out_chan=3, nh=32, out_activation= 'linear', activation='relu', num_groups=None, use_bn=False): super().__init__() self.fc1 = nn.ConvTranspose2d(in_chan, nh * 32, kernel_size=8, bias =True) self.conv1 = nn.ConvTranspose2d(nh * 32, nh * 16, kernel_size=4, stride=2, padding=1, bias=True) self.conv2 = nn.ConvTranspose2d(nh * 16, nh * 8, kernel_size=4, stride=2, padding=1, bias=True) self.conv3 = nn.ConvTranspose2d(nh * 8, nh * 4, kernel_size=4, stride=2, padding=1, bias=True) self.conv4 = nn.ConvTranspose2d(nh * 4, out_chan, kernel_size=1, bias=True) self.in_chan, self.out_chan = in_chan, out_chan self.num_groups = num_groups self.use_bn = use_bn layers = [] layers.append(self.fc1) if num_groups is not None: layers.append(self.get_norm_layer(num_channels=nh * 32)) layers.append(get_activation(activation)) layers.append(self.conv1) if num_groups is not None: layers.append(self.get_norm_layer(num_channels=nh * 16)) layers.append(get_activation(activation)) layers.append(self.conv2) if num_groups is not None: layers.append(self.get_norm_layer(num_channels=nh * 8)) layers.append(get_activation(activation)) layers.append(self.conv3) if num_groups is not None: layers.append(self.get_norm_layer(num_channels=nh * 4)) layers.append(get_activation(activation)) layers.append(self.conv4) out_activation = get_activation(out_activation) if out_activation is not None: layers.append(out_activation) self.net = nn.Sequential(*layers) def forward(self, x): return self.net(x) def get_norm_layer(self, num_channels): if self.num_groups is not None: return nn.GroupNorm(num_groups=self.num_groups, num_channels= num_channels) elif self.use_bn: return nn.BatchNorm2d(num_channels) def get_inputs(): return [torch.rand([4, 64, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 121 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 484 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1936 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 7744 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 92928 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 7744 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (64, 1024, 8, 8), (65536, 64, 8, 1)) assert_size_stride(primals_2, (1024,), (1,)) assert_size_stride(primals_3, (4, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_4, (1024, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (128, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_11, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1024, 11, 11), (123904, 121, 11, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(495616)](buf1, primals_2, 495616, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 512, 22, 22), (247808, 484, 22, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(991232)](buf3, primals_5, 991232, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 44, 44), (495616, 1936, 44, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(1982464)](buf5, primals_7, 1982464, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 88, 88), (991232, 7744, 88, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_3[grid(3964928)](buf7, primals_9, 3964928, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 3, 88, 88), (23232, 7744, 88, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_4[grid(92928)](buf9, primals_11, 92928, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7) def get_activation(s_act): if s_act == 'relu': return nn.ReLU(inplace=True) elif s_act == 'sigmoid': return nn.Sigmoid() elif s_act == 'softplus': return nn.Softplus() elif s_act == 'linear': return None elif s_act == 'tanh': return nn.Tanh() elif s_act == 'leakyrelu': return nn.LeakyReLU(0.2, inplace=True) elif s_act == 'softmax': return nn.Softmax(dim=1) elif s_act == 'spherical': return SphericalActivation() else: raise ValueError(f'Unexpected activation: {s_act}') class SphericalActivation(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x / x.norm(p=2, dim=1, keepdim=True) class DeConvNet64New(nn.Module): """ConvNet architecture for CelebA64 following Ghosh et al., 2019""" def __init__(self, in_chan=64, out_chan=3, nh=32, out_activation= 'linear', activation='relu', num_groups=None, use_bn=False): super().__init__() self.fc1 = nn.ConvTranspose2d(in_chan, nh * 32, kernel_size=8, bias =True) self.conv1 = nn.ConvTranspose2d(nh * 32, nh * 16, kernel_size=4, stride=2, padding=1, bias=True) self.conv2 = nn.ConvTranspose2d(nh * 16, nh * 8, kernel_size=4, stride=2, padding=1, bias=True) self.conv3 = nn.ConvTranspose2d(nh * 8, nh * 4, kernel_size=4, stride=2, padding=1, bias=True) self.conv4 = nn.ConvTranspose2d(nh * 4, out_chan, kernel_size=1, bias=True) self.in_chan, self.out_chan = in_chan, out_chan self.num_groups = num_groups self.use_bn = use_bn layers = [] layers.append(self.fc1) if num_groups is not None: layers.append(self.get_norm_layer(num_channels=nh * 32)) layers.append(get_activation(activation)) layers.append(self.conv1) if num_groups is not None: layers.append(self.get_norm_layer(num_channels=nh * 16)) layers.append(get_activation(activation)) layers.append(self.conv2) if num_groups is not None: layers.append(self.get_norm_layer(num_channels=nh * 8)) layers.append(get_activation(activation)) layers.append(self.conv3) if num_groups is not None: layers.append(self.get_norm_layer(num_channels=nh * 4)) layers.append(get_activation(activation)) layers.append(self.conv4) out_activation = get_activation(out_activation) if out_activation is not None: layers.append(out_activation) self.net = nn.Sequential(*layers) def get_norm_layer(self, num_channels): if self.num_groups is not None: return nn.GroupNorm(num_groups=self.num_groups, num_channels= num_channels) elif self.use_bn: return nn.BatchNorm2d(num_channels) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.conv1.weight primals_5 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_8 = self.conv3.weight primals_9 = self.conv3.bias primals_10 = self.conv4.weight primals_11 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Neural-Diffusion-Research/normalized-autoencoders
DeConvNet64
false
8,709
[ "MIT" ]
30
0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
https://github.com/Neural-Diffusion-Research/normalized-autoencoders/tree/0c77f7e29289e336c0fe5e941aaec8baa4a4fb82
GCNLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/da/cdaepmrnxvddfvroo2pkiykx43azi23c5qlfx64phztulyoh6yrd.py # Topologically Sorted Source Nodes: [node_embeds], Original ATen: [aten.add] # Source node to ATen node mapping: # node_embeds => add # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_out_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/lm/clmx4fcfgme6setczva5utrsvbqxu3hkbkkrbbyeulf3l5v46afh.py # Topologically Sorted Source Nodes: [node_embeds_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # node_embeds_1 => add_1, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/rd/crdgewumqyypvcq2dj5zpdvrkeux5rop5hk5nr5kwljn6ts77f2o.py # Topologically Sorted Source Nodes: [node_embeds_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # node_embeds_1 => add_1, add_2, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {}) triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(primals_2, primals_1, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [ctx_embeds], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [node_embeds], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf2, primals_1, 64, grid=grid(64), stream=stream0) del primals_1 buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [node_embeds_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf2, buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [node_embeds_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_2.run(buf2, buf3, buf4, primals_4, primals_5, buf5, 64, grid=grid(64), stream=stream0) del buf3 del buf4 del primals_5 return (buf5, primals_4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class GCNLayer(nn.Module): def __init__(self, embed_size, dropout=0.0): super().__init__() self.embed_size = embed_size self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False ) self.layernorm = nn.LayerNorm(embed_size) self.dropout = nn.Dropout(dropout) def forward(self, node_fts, rel_edges): """Args: node_fts: (batch_size, num_nodes, embed_size) rel_edges: (batch_size, num_nodes, num_nodes) """ ctx_embeds = self.ctx_layer(torch.bmm(rel_edges, node_fts)) node_embeds = node_fts + self.dropout(ctx_embeds) node_embeds = self.layernorm(node_embeds) return node_embeds def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_2, primals_1, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_add_0[grid(64)](buf2, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(64)](buf2, buf3, buf4, primals_4, primals_5, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del buf4 del primals_5 return buf5, primals_4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2 class GCNLayerNew(nn.Module): def __init__(self, embed_size, dropout=0.0): super().__init__() self.embed_size = embed_size self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False ) self.layernorm = nn.LayerNorm(embed_size) self.dropout = nn.Dropout(dropout) def forward(self, input_0, input_1): primals_3 = self.ctx_layer.weight primals_4 = self.layernorm.weight primals_5 = self.layernorm.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Roc-Ng/HANet
GCNLayer
false
8,710
[ "MIT" ]
34
e679703e9e725205424d87f750358fb4f62ceec5
https://github.com/Roc-Ng/HANet/tree/e679703e9e725205424d87f750358fb4f62ceec5
BahdanauAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/q5/cq5pvtkjkp3aleoa5ymo5bpqc5uppsijk36tuyhb3z7gkyxh5qrb.py # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze, %view_3), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 256) x4 = xindex % 64 x0 = xindex % 4 x5 = xindex % 256 x6 = xindex tmp0 = tl.load(in_ptr0 + (x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(out_ptr0 + (x6), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/sw/csw67i3hhbdxlk5trkuwzwx7ydpxo3jcp6zf6d7a3dzvc6tonkwt.py # Topologically Sorted Source Nodes: [max_1, sub, s_1], Original ATen: [aten.max, aten.sub, aten.exp] # Source node to ATen node mapping: # max_1 => max_1 # s_1 => exp # sub => sub # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_5, 1, True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %getitem), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused_exp_max_sub_1 = async_compile.triton('triton_poi_fused_exp_max_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_max_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_max_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/3i/c3ilbwt4vzv6rs3ejv3hzueol2csqusuqoahabu26e7wimy32cti.py # Topologically Sorted Source Nodes: [sum_1, add_1, a], Original ATen: [aten.sum, aten.add, aten.div] # Source node to ATen node mapping: # a => div # add_1 => add_1 # sum_1 => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %add_1), kwargs = {}) triton_poi_fused_add_div_sum_2 = async_compile.triton('triton_poi_fused_add_div_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 1e-08 tmp9 = tmp7 + tmp8 tmp10 = tmp0 / tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/lf/clfy7rmwwu32dzcgq4k4vmf6qx3omuaw53puabpuc42uzhzp2gz2.py # Topologically Sorted Source Nodes: [mul, c], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # c => sum_2 # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %div), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x1 = (xindex // 4) % 16 x2 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf0, primals_2, buf1, primals_5, buf2, 1024, grid=grid(1024), stream=stream0) del primals_2 del primals_5 buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [s], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [max_1, sub, s_1], Original ATen: [aten.max, aten.sub, aten.exp] triton_poi_fused_exp_max_sub_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sum_1, add_1, a], Original ATen: [aten.sum, aten.add, aten.div] triton_poi_fused_add_div_sum_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [mul, c], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_3.run(primals_6, buf6, buf7, 256, grid=grid(256), stream=stream0) return (buf7, buf6, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf4, primals_7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BahdanauAttention(nn.Module): def __init__(self, hidden_dim): super(BahdanauAttention, self).__init__() self.W = nn.Linear(hidden_dim, hidden_dim) self.U = nn.Linear(hidden_dim, hidden_dim) self.v = nn.Linear(hidden_dim, 1) def forward(self, dec_h_prev, enc_h_all, epsilon=1e-08): w = self.W(dec_h_prev).unsqueeze(1) u = self.U(enc_h_all) s = self.v(torch.tanh(w + u)) m, _ = s.max(dim=1, keepdim=True) s = torch.exp(s - m) a = s / (torch.sum(s, dim=1, keepdim=True) + epsilon) c = torch.sum(enc_h_all * a, dim=1) return c, a def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 256 x4 = xindex % 64 x0 = xindex % 4 x5 = xindex % 256 x6 = xindex tmp0 = tl.load(in_ptr0 + (x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(out_ptr0 + x6, tmp7, xmask) @triton.jit def triton_poi_fused_exp_max_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_add_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 1e-08 tmp9 = tmp7 + tmp8 tmp10 = tmp0 / tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x1 = xindex // 4 % 16 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(1024)](buf0, primals_2, buf1, primals_5, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_5 buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0 ) del buf0 triton_poi_fused_exp_max_sub_1[grid(256)](buf4, buf5, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_add_div_sum_2[grid(256)](buf5, buf6, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_mul_sum_3[grid(256)](primals_6, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf7, buf6, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf4, primals_7 class BahdanauAttentionNew(nn.Module): def __init__(self, hidden_dim): super(BahdanauAttentionNew, self).__init__() self.W = nn.Linear(hidden_dim, hidden_dim) self.U = nn.Linear(hidden_dim, hidden_dim) self.v = nn.Linear(hidden_dim, 1) def forward(self, input_0, input_1): primals_1 = self.W.weight primals_2 = self.W.bias primals_4 = self.U.weight primals_5 = self.U.bias primals_7 = self.v.weight primals_8 = self.v.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
RiTUAL-UH/style_NER
BahdanauAttention
false
8,711
[ "MIT" ]
17
4bb206cb48a45cc71deea3eea249eeb266c019a4
https://github.com/RiTUAL-UH/style_NER/tree/4bb206cb48a45cc71deea3eea249eeb266c019a4
ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/c5/cc5hodm36vyyy6mtrsd3kh6yp3wh2yz4b7xlcd53hs3fb3erko24.py # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_2 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yh/cyhf6bhaqimi2pucos5fnrpvhrt4vuaetbxnooyr5pvgjt7s6fgo.py # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_2 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attention_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BaseAttention(nn.Module): def __init__(self): super().__init__() def forward(self, *args, **kwargs): raise NotImplementedError class ScaledDotProductAttention(BaseAttention): """Scaled dot-product attention calculation""" def __init__(self, dropout_rate=0.0, **kwargs): """Initialize ScaledDotProductAttention Args: dropout_rate (float): attention dropout_rate rate """ super().__init__() self.dropout = nn.Dropout(dropout_rate) def forward(self, q, k, v, attn_mask=None): """Forward Args: q (torch.Tensor): Query matrix, (B, T_q, D_q) k (torch.Tensor): Key matrix, (B, T_k, D_k) v (torch.Tensor): Value matrix, (B, T_v, D_v) T_v = T_k, D_v = D_k attn_mask (torch.BoolTensor | None): Mask tensor. True element will be masked. Returns: output (B, T_q, D_v); attention (B, T_q, T_k) """ attention = torch.bmm(q, k.permute(0, 2, 1)) attention *= k.size(-1) ** -0.5 if attn_mask is not None: attention.masked_fill_(attn_mask, -np.inf) attention = F.softmax(attention, dim=-1) attention = self.dropout(attention) output = attention.bmm(v) return output, attention def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class BaseAttention(nn.Module): def __init__(self): super().__init__() def forward(self, *args, **kwargs): raise NotImplementedError class ScaledDotProductAttentionNew(BaseAttention): """Scaled dot-product attention calculation""" def __init__(self, dropout_rate=0.0, **kwargs): """Initialize ScaledDotProductAttention Args: dropout_rate (float): attention dropout_rate rate """ super().__init__() self.dropout = nn.Dropout(dropout_rate) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
ROBINADC/BiGRU-CRF-with-Attention-for-NER
ScaledDotProductAttention
false
8,712
[ "MIT" ]
27
b9e037ebd6e1d56500ffb60c6030013982c17ded
https://github.com/ROBINADC/BiGRU-CRF-with-Attention-for-NER/tree/b9e037ebd6e1d56500ffb60c6030013982c17ded
AttnGCNLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/fw/cfwliyx2v34i2f3hr4soz2qsuc4weunsj4igdxpmodazqhnzgcmz.py # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] # Source node to ATen node mapping: # eq => eq # Graph fragment: # %eq : [num_users=3] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_6, 0), kwargs = {}) triton_poi_fused_eq_0 = async_compile.triton('triton_poi_fused_eq_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/3w/c3wt6fhoeuattftgghcbcfbxkeuqtyewpgfjjr5xlwxmfngikz24.py # Topologically Sorted Source Nodes: [attn_scores, attn_scores_1, attn_scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_scores => div # attn_scores_1 => full_default, where # attn_scores_2 => amax, exp, sub, sum_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_7, 2.0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_1 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = -9.999999843067494e+17 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + (x0), tmp20, xmask) tl.store(out_ptr1 + (x0), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/n7/cn7clp2l2akmnrptn4wklz76hjxsuoecectykliowvokjl2uxg7e.py # Topologically Sorted Source Nodes: [attn_scores, attn_scores_1, attn_scores_2, attn_scores_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn_scores => div # attn_scores_1 => full_default, where # attn_scores_2 => amax, div_1, exp, sub # attn_scores_3 => full_default_1, where_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_7, 2.0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %div_1), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = -9.999999843067494e+17 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp0, tmp11, tmp10) tl.store(in_out_ptr0 + (x2), tmp10, xmask) tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zb/czb43dsdqo3o4ceeiy3tfgdmpzbzd6a5n46iclzndsyquoh5mwnu.py # Topologically Sorted Source Nodes: [node_embeds, node_embeds_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # node_embeds => add # node_embeds_1 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_9), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ud/cudhbs256c52wrxh645mq3bmvoarfdcntgcvck2jcd6hpyibyo4j.py # Topologically Sorted Source Nodes: [node_embeds, node_embeds_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # node_embeds => add # node_embeds_1 => add_1, add_2, mul, mul_1, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_9), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_8), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_9), kwargs = {}) triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] stream0 = get_raw_stream(0) triton_poi_fused_eq_0.run(primals_6, buf3, 64, grid=grid(64), stream=stream0) del primals_6 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [attn_scores, attn_scores_1, attn_scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_1.run(buf3, buf2, buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = buf2; del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_scores, attn_scores_1, attn_scores_2, attn_scores_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_2.run(buf6, buf3, buf4, buf5, buf7, 64, grid=grid(64), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_scores_3, bmm], Original ATen: [aten.masked_fill, aten.bmm] extern_kernels.bmm(buf7, primals_3, out=buf8) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [ctx_embeds], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf9) buf10 = buf5; del buf5 # reuse buf11 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [node_embeds, node_embeds_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_3.run(primals_3, buf9, buf10, buf11, 16, grid=grid(16), stream=stream0) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [node_embeds, node_embeds_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_4.run(primals_3, buf9, buf10, buf11, primals_8, primals_9, buf12, 64, grid=grid(64), stream=stream0) del buf10 del buf11 del primals_9 return (buf12, primals_3, primals_8, buf3, buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, primals_7, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.utils.data class GCNLayer(nn.Module): def __init__(self, embed_size, dropout=0.0): super().__init__() self.embed_size = embed_size self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False ) self.layernorm = nn.LayerNorm(embed_size) self.dropout = nn.Dropout(dropout) def forward(self, node_fts, rel_edges): """Args: node_fts: (batch_size, num_nodes, embed_size) rel_edges: (batch_size, num_nodes, num_nodes) """ ctx_embeds = self.ctx_layer(torch.bmm(rel_edges, node_fts)) node_embeds = node_fts + self.dropout(ctx_embeds) node_embeds = self.layernorm(node_embeds) return node_embeds class AttnGCNLayer(GCNLayer): def __init__(self, embed_size, d_ff, dropout=0.0): super().__init__(embed_size, dropout=dropout) self.edge_attn_query = nn.Linear(embed_size, d_ff) self.edge_attn_key = nn.Linear(embed_size, d_ff) self.attn_denominator = math.sqrt(d_ff) def forward(self, node_fts, rel_edges): """ Args: node_fts: (batch_size, num_nodes, embed_size) rel_edges: (batch_size, num_nodes, num_nodes) """ attn_scores = torch.einsum('bod,bid->boi', self.edge_attn_query( node_fts), self.edge_attn_key(node_fts)) / self.attn_denominator attn_scores = attn_scores.masked_fill(rel_edges == 0, -1e+18) attn_scores = torch.softmax(attn_scores, dim=2) attn_scores = attn_scores.masked_fill(rel_edges == 0, 0) ctx_embeds = self.ctx_layer(torch.bmm(attn_scores, node_fts)) node_embeds = node_fts + self.dropout(ctx_embeds) node_embeds = self.layernorm(node_embeds) return node_embeds def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embed_size': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = -9.999999843067494e+17 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = -9.999999843067494e+17 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp0, tmp11, tmp10) tl.store(in_out_ptr0 + x2, tmp10, xmask) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(64)](primals_6, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_div_masked_fill_1[grid(16)](buf3, buf2, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf2 del buf2 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf3, buf4, buf5, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf7, primals_3, out=buf8) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf9) buf10 = buf5 del buf5 buf11 = buf4 del buf4 triton_poi_fused_add_native_layer_norm_3[grid(16)](primals_3, buf9, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(64)](primals_3, buf9, buf10, buf11, primals_8, primals_9, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf11 del primals_9 return buf12, primals_3, primals_8, buf3, buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, primals_7, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) class GCNLayer(nn.Module): def __init__(self, embed_size, dropout=0.0): super().__init__() self.embed_size = embed_size self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False ) self.layernorm = nn.LayerNorm(embed_size) self.dropout = nn.Dropout(dropout) def forward(self, node_fts, rel_edges): """Args: node_fts: (batch_size, num_nodes, embed_size) rel_edges: (batch_size, num_nodes, num_nodes) """ ctx_embeds = self.ctx_layer(torch.bmm(rel_edges, node_fts)) node_embeds = node_fts + self.dropout(ctx_embeds) node_embeds = self.layernorm(node_embeds) return node_embeds class AttnGCNLayerNew(GCNLayer): def __init__(self, embed_size, d_ff, dropout=0.0): super().__init__(embed_size, dropout=dropout) self.edge_attn_query = nn.Linear(embed_size, d_ff) self.edge_attn_key = nn.Linear(embed_size, d_ff) self.attn_denominator = math.sqrt(d_ff) def forward(self, input_0, input_1): primals_1 = self.ctx_layer.weight primals_2 = self.layernorm.weight primals_5 = self.layernorm.bias primals_4 = self.edge_attn_query.weight primals_8 = self.edge_attn_query.bias primals_7 = self.edge_attn_key.weight primals_9 = self.edge_attn_key.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Roc-Ng/HANet
AttnGCNLayer
false
8,713
[ "MIT" ]
34
e679703e9e725205424d87f750358fb4f62ceec5
https://github.com/Roc-Ng/HANet/tree/e679703e9e725205424d87f750358fb4f62ceec5
ASC
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/yz/cyz6rpze7eebhmqlzv7yxtgdjv5t446mq2vo54cxk73wqitu6oht.py # Topologically Sorted Source Nodes: [mul, exp, mul_1, exp_1, sum_1, div], Original ATen: [aten.mul, aten.exp, aten.sum, aten.div] # Source node to ATen node mapping: # div => div # exp => exp # exp_1 => exp_1 # mul => mul # mul_1 => mul_1 # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 3.5), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 3.5), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused_div_exp_mul_sum_0 = async_compile.triton('triton_poi_fused_div_exp_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_exp_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp4 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp1 = 3.5 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp4 * tmp1 tmp6 = tl_math.exp(tmp5) tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tmp6 + tmp9 tmp12 = tmp11 * tmp1 tmp13 = tl_math.exp(tmp12) tmp14 = tmp10 + tmp13 tmp16 = tmp15 * tmp1 tmp17 = tl_math.exp(tmp16) tmp18 = tmp14 + tmp17 tmp19 = tmp3 / tmp18 tl.store(out_ptr0 + (x3), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, exp, mul_1, exp_1, sum_1, div], Original ATen: [aten.mul, aten.exp, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_exp_mul_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.optim import torch.nn as nn import torch.nn.init class ASC(nn.Module): def __init__(self, a=3.5): super().__init__() self.a = a def forward(self, input): return torch.div(torch.exp(self.a * input), torch.sum(torch.exp( self.a * input), dim=1)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.optim import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp4 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp1 = 3.5 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp4 * tmp1 tmp6 = tl_math.exp(tmp5) tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tmp6 + tmp9 tmp12 = tmp11 * tmp1 tmp13 = tl_math.exp(tmp12) tmp14 = tmp10 + tmp13 tmp16 = tmp15 * tmp1 tmp17 = tl_math.exp(tmp16) tmp18 = tmp14 + tmp17 tmp19 = tmp3 / tmp18 tl.store(out_ptr0 + x3, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_mul_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ASCNew(nn.Module): def __init__(self, a=3.5): super().__init__() self.a = a def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
RichardScottOZ/UnDIP
ASC
false
8,714
[ "Apache-2.0" ]
10
8e4a39801142495e785cfbae0744872729fa3fac
https://github.com/RichardScottOZ/UnDIP/tree/8e4a39801142495e785cfbae0744872729fa3fac
RawNTN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/zm/czm6acrrgjryz6xi3wza7npycjuiqsdsygpfdo3lbzaquecrmeuj.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_4, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/qw/cqw33f4xf6xs4gpmtl36somstlfpstoiih2hjqek3ld76xyp5kwr.py # Topologically Sorted Source Nodes: [bilinear, add, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add_1 # bilinear => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_trilinear, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mm), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 5 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (5, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (5, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (5, 8), (8, 1)) assert_size_stride(primals_6, (1, 5), (5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear] buf0 = torch.ops.aten._trilinear.default(primals_4, primals_1, primals_3, [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_4, primals_3, buf2, 32, grid=grid(32), stream=stream0) buf3 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (8, 5), (1, 8), 0), out=buf3) del primals_5 buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [bilinear, add, tanh], Original ATen: [aten.add, aten.tanh] triton_poi_fused_add_tanh_1.run(buf4, primals_2, buf3, 20, grid=grid(20), stream=stream0) del buf3 del primals_2 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (5, 1), (1, 5), 0), out=buf5) return (buf5, primals_4, primals_3, buf2, buf4, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((5, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((5, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 5), (5, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class RawNTN(nn.Module): def __init__(self, l_dim, r_dim, k=5, non_linear=torch.tanh): super(RawNTN, self).__init__() self.u_R = nn.Linear(k, 1, bias=False) self.f = non_linear self.W = nn.Bilinear(l_dim, r_dim, k, bias=True) self.V = nn.Linear(l_dim + r_dim, k, bias=False) def forward(self, e, q): """ e1: tensor of size (*, l_dim) e2: tensor of size (*, r_dim) return: tensor of size (*, 1) """ return self.u_R(self.f(self.W(e, q) + self.V(torch.cat((e, q), 1)))) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'l_dim': 4, 'r_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (5, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (5,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (5, 8), (8, 1)) assert_size_stride(primals_6, (1, 5), (5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(primals_4, primals_1, primals_3, [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_4, primals_3, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (8, 5), (1, 8 ), 0), out=buf3) del primals_5 buf4 = buf1 del buf1 triton_poi_fused_add_tanh_1[grid(20)](buf4, primals_2, buf3, 20, XBLOCK=32, num_warps=1, num_stages=1) del buf3 del primals_2 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (5, 1), (1, 5 ), 0), out=buf5) return buf5, primals_4, primals_3, buf2, buf4, primals_6 class RawNTNNew(nn.Module): def __init__(self, l_dim, r_dim, k=5, non_linear=torch.tanh): super(RawNTNNew, self).__init__() self.u_R = nn.Linear(k, 1, bias=False) self.f = non_linear self.W = nn.Bilinear(l_dim, r_dim, k, bias=True) self.V = nn.Linear(l_dim + r_dim, k, bias=False) def forward(self, input_0, input_1): primals_6 = self.u_R.weight primals_1 = self.W.weight primals_2 = self.W.bias primals_5 = self.V.weight primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
QingkaiZeng/GenTaxo
RawNTN
false
8,715
[ "MIT" ]
28
10257a1714d14c6a4c49cbfa0b507408f718cdf0
https://github.com/QingkaiZeng/GenTaxo/tree/10257a1714d14c6a4c49cbfa0b507408f718cdf0
RawArborist
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/qr/cqrfkrcvd6hyaruvszficx5gjtw5rztvgnytksay2pmrngfngkah.py # Topologically Sorted Source Nodes: [mul, sum_1], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_4), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1], True), kwargs = {}) triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (5*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (5*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (5*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (5*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (5*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (5*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (5*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (4 + (5*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (4 + (5*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tl.store(out_ptr0 + (x0), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (5, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (5, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [u], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 5), (1, 4), 0), out=buf0) del primals_1 # Topologically Sorted Source Nodes: [w], Original ATen: [aten._trilinear] buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sum_1], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_mul_sum_0.run(buf0, buf2, buf3, 64, grid=grid(64), stream=stream0) return (buf3, primals_2, buf0, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((5, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((5, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class RawArborist(nn.Module): def __init__(self, l_dim, r_dim, k=5): super(RawArborist, self).__init__() self.u = nn.Linear(l_dim, k, bias=False) self.W = nn.Bilinear(l_dim, r_dim, k, bias=False) def forward(self, e, q): u = self.u(e) w = self.W(e, q) return torch.sum(u * w, dim=-1, keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'l_dim': 4, 'r_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 5 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 5 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 5 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 5 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (4 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (4 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (5, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (5, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 5), (5, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 5), (1, 4), 0), out=buf0) del primals_1 buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_2, (64, 4), (4, 1), 0), primals_3, reinterpret_tensor( primals_4, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(64)](buf0, buf2, buf3, 64, XBLOCK= 64, num_warps=1, num_stages=1) return buf3, primals_2, buf0, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf2 class RawArboristNew(nn.Module): def __init__(self, l_dim, r_dim, k=5): super(RawArboristNew, self).__init__() self.u = nn.Linear(l_dim, k, bias=False) self.W = nn.Bilinear(l_dim, r_dim, k, bias=False) def forward(self, input_0, input_1): primals_1 = self.u.weight primals_3 = self.W.weight primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
QingkaiZeng/GenTaxo
RawArborist
false
8,716
[ "MIT" ]
28
10257a1714d14c6a4c49cbfa0b507408f718cdf0
https://github.com/QingkaiZeng/GenTaxo/tree/10257a1714d14c6a4c49cbfa0b507408f718cdf0
SLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/5s/c5s7q2ybzwodeuorig3awcwebk53edcn54ogeiiskva3k6c4iqxd.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/md/cmdzxuitxxutljkpd3ejipousqkvfcnxstvzuzedu4k7anvsnnba.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%mm,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, primals_3, buf0, 48, grid=grid(48), stream=stream0) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf1) del primals_4 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, 16, grid=grid(16), stream=stream0) buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3) return (buf3, buf0, buf2, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SLP(nn.Module): def __init__(self, l_dim, r_dim, hidden_dim, non_linear=F.tanh): super(SLP, self).__init__() self.u_R = nn.Linear(hidden_dim, 1, bias=False) self.f = non_linear self.ffn = nn.Linear(l_dim * 2 + r_dim, hidden_dim, bias=False) def forward(self, e1, e2, q): """ e1: tensor of size (*, l_dim) e2: tensor of size (*, r_dim) return: tensor of size (*, 1) """ return self.u_R(self.f(self.ffn(torch.cat((e1, e2, q), 1)))) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'l_dim': 4, 'r_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf1) del primals_4 buf2 = buf1 del buf1 triton_poi_fused_tanh_1[grid(16)](buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4 ), 0), out=buf3) return buf3, buf0, buf2, primals_5 class SLPNew(nn.Module): def __init__(self, l_dim, r_dim, hidden_dim, non_linear=F.tanh): super(SLPNew, self).__init__() self.u_R = nn.Linear(hidden_dim, 1, bias=False) self.f = non_linear self.ffn = nn.Linear(l_dim * 2 + r_dim, hidden_dim, bias=False) def forward(self, input_0, input_1, input_2): primals_5 = self.u_R.weight primals_4 = self.ffn.weight primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
QingkaiZeng/GenTaxo
SLP
false
8,717
[ "MIT" ]
28
10257a1714d14c6a4c49cbfa0b507408f718cdf0
https://github.com/QingkaiZeng/GenTaxo/tree/10257a1714d14c6a4c49cbfa0b507408f718cdf0
LBM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/zn/czn6cztle4peyy4pa7mkag53s34sjkn6wpenptu6ttfuvhgzzrup.py # Topologically Sorted Source Nodes: [e], Original ATen: [aten.cat] # Source node to ATen node mapping: # e => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/6c/c6clp4rznkerpyt4ywcadsgrzcogvlzpkfzlhxq5x2zvo4rz6j4e.py # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] # Source node to ATen node mapping: # exp => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%view_2,), kwargs = {}) triton_poi_fused_exp_1 = async_compile.triton('triton_poi_fused_exp_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl_math.exp(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 8, 4), (32, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [e], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear] buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf2 = buf1 del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] triton_poi_fused_exp_1.run(buf3, 64, grid=grid(64), stream=stream0) return (buf3, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 8, 4), (32, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LBM(nn.Module): def __init__(self, l_dim, r_dim): super(LBM, self).__init__() self.W = nn.Bilinear(l_dim * 2, r_dim, 1, bias=False) def forward(self, e1, e2, q): """ e1: tensor of size (*, l_dim) e2: tensor of size (*, r_dim) return: tensor of size (*, 1) """ e = torch.cat((e1, e2), -1) return torch.exp(self.W(e, q)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'l_dim': 4, 'r_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_exp_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 8, 4), (32, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, ( 64, 8), (8, 1), 0), primals_3, reinterpret_tensor(primals_4, ( 64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf2 = buf1 del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused_exp_1[grid(64)](buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf3, reinterpret_tensor(buf0, (64, 8), (8, 1), 0 ), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf3 class LBMNew(nn.Module): def __init__(self, l_dim, r_dim): super(LBMNew, self).__init__() self.W = nn.Bilinear(l_dim * 2, r_dim, 1, bias=False) def forward(self, input_0, input_1, input_2): primals_3 = self.W.weight primals_1 = input_0 primals_2 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
QingkaiZeng/GenTaxo
LBM
false
8,718
[ "MIT" ]
28
10257a1714d14c6a4c49cbfa0b507408f718cdf0
https://github.com/QingkaiZeng/GenTaxo/tree/10257a1714d14c6a4c49cbfa0b507408f718cdf0
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/dh/cdhwwkkwwalf3fekd34n4w7urr47fv5kpqakc24ioadnboe5irlo.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/wa/cwafcd4jm52aavsle3afevcrj4xk6lqjnrsj5w7fxw6jq7upf34w.py # Topologically Sorted Source Nodes: [kv], Original ATen: [aten.clone] # Source node to ATen node mapping: # kv => clone # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y0 = yindex % 4 y1 = (yindex // 4) % 4 y2 = (yindex // 16) y4 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*y2) + (8*x3) + (32*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3 + (4*y4)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/k6/ck67qremsmvlkkfvvruz4led6liru3etuwkbpb4a5ulluramr4bu.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/4d/c4dndrlfjcamjfnn3ng5agjc3ahefdgw6jcsnn6hm4ljwpbfbe7h.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vt/cvteqb7nstewoucfq5ik6nusssetiyrkhlit4vdnfn7pmp34uzyh.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add] # Source node to ATen node mapping: # x_1 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %primals_6), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (8, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 8), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, buf2, 16, 4, grid=grid(16, 4), stream=stream0) buf3 = empty_strided_cuda((2, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [kv], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, buf3, 32, 4, grid=grid(32, 4), stream=stream0) del buf1 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 0, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 buf7 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 64), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf7, buf8, 16, 4, grid=grid(16, 4), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf10, primals_6, 64, grid=grid(64), stream=stream0) del primals_6 return (buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 64), reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn import torch.utils.data.dataloader import torch.utils.data import torch.onnx import torch.backends.cudnn class Attention(nn.Module): def __init__(self, dim_q, dim_kv, num_heads=4, qkv_bias=False, stride=1): super().__init__() self.dim = dim_q self.num_heads = num_heads head_dim = dim_q // num_heads self.scale = head_dim ** -0.5 self.q = nn.Linear(dim_q, dim_q, bias=qkv_bias) self.kv = nn.Linear(dim_kv, dim_q * 2, bias=qkv_bias) self.proj = nn.Linear(dim_q, dim_q) self.stride = stride if stride > 1: self.pool = nn.AvgPool2d(stride, stride=stride) self.sr = nn.Conv2d(dim_kv, dim_kv, kernel_size=1, stride=1) self.norm = nn.LayerNorm(dim_kv) self.act = nn.GELU() def forward(self, x, y): B, N, C = x.shape B, L, C2 = y.shape H = W = int(math.sqrt(L)) q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads ).permute(0, 2, 1, 3) if self.stride > 1: y_ = y.permute(0, 2, 1).contiguous().reshape(B, C2, H, W) y_ = self.sr(self.pool(y_)).reshape(B, C2, -1).permute(0, 2, 1) y_ = self.norm(y_) y_ = self.act(y_) kv = self.kv(y_).reshape(B, -1, 2, self.num_heads, C // self. num_heads).permute(2, 0, 3, 1, 4).contiguous() else: kv = self.kv(y).reshape(B, -1, 2, self.num_heads, C // self. num_heads).permute(2, 0, 3, 1, 4).contiguous() k, v = kv[0], kv[1] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim_q': 4, 'dim_kv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data.dataloader import torch.utils.data import torch.onnx import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 32 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y0 = yindex % 4 y1 = yindex // 4 % 4 y2 = yindex // 16 y4 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * y2 + 8 * x3 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3 + 4 * y4), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (8, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 8), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, buf2, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((2, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_1[grid(32, 4)](buf1, buf3, 32, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del buf1 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 0, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 buf7 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 64), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0) del buf9 triton_poi_fused_add_4[grid(64)](buf10, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 return buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_5, reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 64 ), reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0) class AttentionNew(nn.Module): def __init__(self, dim_q, dim_kv, num_heads=4, qkv_bias=False, stride=1): super().__init__() self.dim = dim_q self.num_heads = num_heads head_dim = dim_q // num_heads self.scale = head_dim ** -0.5 self.q = nn.Linear(dim_q, dim_q, bias=qkv_bias) self.kv = nn.Linear(dim_kv, dim_q * 2, bias=qkv_bias) self.proj = nn.Linear(dim_q, dim_q) self.stride = stride if stride > 1: self.pool = nn.AvgPool2d(stride, stride=stride) self.sr = nn.Conv2d(dim_kv, dim_kv, kernel_size=1, stride=1) self.norm = nn.LayerNorm(dim_kv) self.act = nn.GELU() def forward(self, input_0, input_1): primals_3 = self.q.weight primals_4 = self.kv.weight primals_5 = self.proj.weight primals_6 = self.proj.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
RISC-NYUAD/SiamTPNTracker
Attention
false
8,719
[ "MIT" ]
12
cbff7373941cb30d4a970cac1ee29706d422c212
https://github.com/RISC-NYUAD/SiamTPNTracker/tree/cbff7373941cb30d4a970cac1ee29706d422c212
MSELossWithSigmoid
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/oq/coq63mkiudl4wsoahk3bo2bn7hozitifrrjynwh3joqiypxsuajz.py # Topologically Sorted Source Nodes: [sigmoid, mse_loss], Original ATen: [aten.sigmoid, aten.mse_loss] # Source node to ATen node mapping: # mse_loss => mean, pow_1, sub # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sigmoid, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) triton_per_fused_mse_loss_sigmoid_0 = async_compile.triton('triton_per_fused_mse_loss_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mse_loss_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 - tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sigmoid, mse_loss], Original ATen: [aten.sigmoid, aten.mse_loss] stream0 = get_raw_stream(0) triton_per_fused_mse_loss_sigmoid_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class MSELossWithSigmoid(torch.nn.Module): def __init__(self): super().__init__() self.mse = torch.nn.MSELoss() self.sigmoid = torch.nn.Sigmoid() self.loss = lambda x, y: self.mse(self.sigmoid(x), y) def forward(self, source, target): return self.loss(source, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 - tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_sigmoid_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MSELossWithSigmoidNew(torch.nn.Module): def __init__(self): super().__init__() self.mse = torch.nn.MSELoss() self.sigmoid = torch.nn.Sigmoid() self.loss = lambda x, y: self.mse(self.sigmoid(x), y) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Roulbac/GanSeg
MSELossWithSigmoid
false
8,720
[ "MIT" ]
20
78f354da5d724b93ead3ac6c2b15ae18d3ac0aea
https://github.com/Roulbac/GanSeg/tree/78f354da5d724b93ead3ac6c2b15ae18d3ac0aea
NTN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/ba/cbae2iys3sel45mbtg23doquxwo2k42je62bke7nsudzuhvxp4ug.py # Topologically Sorted Source Nodes: [e, bilinear], Original ATen: [aten.cat, aten.view] # Source node to ATen node mapping: # bilinear => view # e => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%cat, [-1, 8]), kwargs = {}) triton_poi_fused_cat_view_0 = async_compile.triton('triton_poi_fused_cat_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_view_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_view_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yv/cyvadj3jht4qx6quemy3wzaqqh6liuigeuodjr7n4dt43za3zsuu.py # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %primals_4], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 8, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + ((4*x1) + x0), tmp7 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp0 >= tmp5 tmp10 = tmp9 & tmp4 tmp11 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp6, tmp8, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 12, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp4, tmp14, tmp18) tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/os/cosoqgm7ml6kgask6w2cicc2ddi3etfex22z3bdxvajjaa2zw227.py # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_trilinear, %mm), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_2 = async_compile.triton('triton_poi_fused_add_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (5, 8, 4), (32, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (5, 12), (12, 1)) assert_size_stride(primals_6, (1, 5), (5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [e, bilinear], Original ATen: [aten.cat, aten.view] stream0 = get_raw_stream(0) triton_poi_fused_cat_view_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear] buf1 = torch.ops.aten._trilinear.default(buf0, primals_3, primals_4, [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_1, primals_2, primals_4, buf3, 48, grid=grid(48), stream=stream0) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (12, 5), (1, 12), 0), out=buf4) del primals_5 buf5 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] triton_poi_fused_add_tanh_2.run(buf5, buf4, 20, grid=grid(20), stream=stream0) del buf4 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (5, 1), (1, 5), 0), out=buf6) return (buf6, buf0, primals_4, buf3, buf5, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((5, 8, 4), (32, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((5, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 5), (5, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class NTN(nn.Module): def __init__(self, l_dim, r_dim, k=5, non_linear=F.tanh): super(NTN, self).__init__() self.u_R = nn.Linear(k, 1, bias=False) self.f = non_linear self.W = nn.Bilinear(l_dim * 2, r_dim, k, bias=False) self.V = nn.Linear(l_dim * 2 + r_dim, k, bias=False) def forward(self, e1, e2, q): """ e1: tensor of size (*, l_dim) e2: tensor of size (*, r_dim) return: tensor of size (*, 1) """ e = torch.cat((e1, e2), -1) return self.u_R(self.f(self.W(e, q) + self.V(torch.cat((e, q), 1)))) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'l_dim': 4, 'r_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_view_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 8, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + (4 * x1 + x0), tmp7 & xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tmp0 >= tmp5 tmp10 = tmp9 & tmp4 tmp11 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp6, tmp8, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tl.full([1], 12, tl.int64) tmp18 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp4, tmp14, tmp18) tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_add_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (5, 8, 4), (32, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (5, 12), (12, 1)) assert_size_stride(primals_6, (1, 5), (5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_view_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) buf1 = torch.ops.aten._trilinear.default(buf0, primals_3, primals_4, [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 12), (12, 1), torch.float32) triton_poi_fused_cat_1[grid(48)](primals_1, primals_2, primals_4, buf3, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf4 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (12, 5), (1, 12), 0), out=buf4) del primals_5 buf5 = buf2 del buf2 triton_poi_fused_add_tanh_2[grid(20)](buf5, buf4, 20, XBLOCK=32, num_warps=1, num_stages=1) del buf4 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (5, 1), (1, 5 ), 0), out=buf6) return buf6, buf0, primals_4, buf3, buf5, primals_6 class NTNNew(nn.Module): def __init__(self, l_dim, r_dim, k=5, non_linear=F.tanh): super(NTNNew, self).__init__() self.u_R = nn.Linear(k, 1, bias=False) self.f = non_linear self.W = nn.Bilinear(l_dim * 2, r_dim, k, bias=False) self.V = nn.Linear(l_dim * 2 + r_dim, k, bias=False) def forward(self, input_0, input_1, input_2): primals_6 = self.u_R.weight primals_3 = self.W.weight primals_5 = self.V.weight primals_1 = input_0 primals_2 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
QingkaiZeng/GenTaxo
NTN
false
8,721
[ "MIT" ]
28
10257a1714d14c6a4c49cbfa0b507408f718cdf0
https://github.com/QingkaiZeng/GenTaxo/tree/10257a1714d14c6a4c49cbfa0b507408f718cdf0
Arborist
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/zn/czn6cztle4peyy4pa7mkag53s34sjkn6wpenptu6ttfuvhgzzrup.py # Topologically Sorted Source Nodes: [e], Original ATen: [aten.cat] # Source node to ATen node mapping: # e => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/cj/ccj5mlv2cdaffjg36albvpfaxa4jgym7nam43szxakf5ymtu7d5g.py # Topologically Sorted Source Nodes: [mul, sum_1], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_4), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1], True), kwargs = {}) triton_poi_fused_mul_sum_1 = async_compile.triton('triton_poi_fused_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (5*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (5*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (5*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (5*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (5*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (5*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (5*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (4 + (5*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (4 + (5*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tl.store(out_ptr0 + (x0), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (5, 8), (8, 1)) assert_size_stride(primals_4, (5, 8, 4), (32, 4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [e], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [u], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 5), (1, 8), 0), out=buf1) del primals_3 # Topologically Sorted Source Nodes: [w], Original ATen: [aten._trilinear] buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), primals_4, reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_4 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sum_1], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_1.run(buf1, buf3, buf4, 64, grid=grid(64), stream=stream0) return (buf4, buf0, buf1, reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((5, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((5, 8, 4), (32, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Arborist(nn.Module): def __init__(self, l_dim, r_dim, k=5): super(Arborist, self).__init__() self.u = nn.Linear(l_dim * 2, k, bias=False) self.W = nn.Bilinear(l_dim * 2, r_dim, k, bias=False) def forward(self, e1, e2, q): """ e1: tensor of size (*, l_dim) e2: tensor of size (*, r_dim) return: tensor of size (*, 1) """ e = torch.cat((e1, e2), -1) u = self.u(e) w = self.W(e, q) return torch.sum(u * w, dim=-1, keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'l_dim': 4, 'r_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 5 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 5 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 5 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 5 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (4 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (4 + 5 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (5, 8), (8, 1)) assert_size_stride(primals_4, (5, 8, 4), (32, 4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 5), (5, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 5), (1, 8), 0), out=buf1) del primals_3 buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, ( 64, 8), (8, 1), 0), primals_4, reinterpret_tensor(primals_5, ( 64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_4 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_mul_sum_1[grid(64)](buf1, buf3, buf4, 64, XBLOCK= 64, num_warps=1, num_stages=1) return buf4, buf0, buf1, reinterpret_tensor(primals_5, (64, 4), (4, 1), 0 ), buf3 class ArboristNew(nn.Module): def __init__(self, l_dim, r_dim, k=5): super(ArboristNew, self).__init__() self.u = nn.Linear(l_dim * 2, k, bias=False) self.W = nn.Bilinear(l_dim * 2, r_dim, k, bias=False) def forward(self, input_0, input_1, input_2): primals_3 = self.u.weight primals_4 = self.W.weight primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
QingkaiZeng/GenTaxo
Arborist
false
8,722
[ "MIT" ]
28
10257a1714d14c6a4c49cbfa0b507408f718cdf0
https://github.com/QingkaiZeng/GenTaxo/tree/10257a1714d14c6a4c49cbfa0b507408f718cdf0
CosineAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/iy/ciygegsg63hmgl2jngfyuogbbm2zkeyan6kfpvyu2avi4djodpwx.py # Topologically Sorted Source Nodes: [norm, add, q_norm], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add # norm => pow_1, pow_2, sum_1 # q_norm => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-10), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %add), kwargs = {}) triton_poi_fused_add_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/a5/ca56rgpdjbilaspdaau44lnrilsxekhmcnbwzhtrcgfbilkik27p.py # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xl/cxls6dl5dz3ua4ilno7rjcfd6m7p4ydnd3mzfaq2cepnph6e2y7h.py # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_1 => div_2, sum_3 # Graph fragment: # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_3), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, add, q_norm], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm_1, add_1, k_norm], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div] triton_poi_fused_add_div_linalg_vector_norm_0.run(arg1_1, buf1, 64, grid=grid(64), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, add, q_norm, attention], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div, aten.bmm] extern_kernels.bmm(buf0, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) del buf0 buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, arg2_1, out=buf5) del arg2_1 return (buf5, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class BaseAttention(nn.Module): def __init__(self): super().__init__() def forward(self, *args, **kwargs): raise NotImplementedError class CosineAttention(BaseAttention): """Cosine Attention""" def __init__(self, dropout_rate=0.0, eps=1e-10, **kwargs): super().__init__() self.dropout = nn.Dropout(dropout_rate) self.eps = eps def forward(self, q, k, v, attn_mask=None): """Forward Args: q (torch.Tensor): Query matrix, (B, T_q, D_q) k (torch.Tensor): Key matrix, (B, T_k, D_k) v (torch.Tensor): Value matrix, (B, T_v, D_v) T_v = T_k, D_v = D_k attn_mask (torch.BoolTensor | None): Mask tensor. True element will be masked. Returns: output (B, T_q, D_v); attention (B, T_q, T_k) Notes: Consine attention requires D_q = D_k, so I denote it as D here """ q_norm = q / (q.norm(p=2, dim=-1, keepdim=True) + self.eps) k_norm = k / (k.norm(p=2, dim=-1, keepdim=True) + self.eps) attention = torch.bmm(q_norm, k_norm.permute(0, 2, 1)) if attn_mask is not None: attention.masked_fill_(attn_mask, -np.inf) attention = F.softmax(attention, dim=-1) attention = self.dropout(attention) output = attention.bmm(v) return output, attention def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_linalg_vector_norm_0[grid(64)](arg1_1, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf0, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) del buf0 buf3 = buf1 del buf1 triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf3 del buf3 extern_kernels.bmm(buf4, arg2_1, out=buf5) del arg2_1 return buf5, buf4 class BaseAttention(nn.Module): def __init__(self): super().__init__() def forward(self, *args, **kwargs): raise NotImplementedError class CosineAttentionNew(BaseAttention): """Cosine Attention""" def __init__(self, dropout_rate=0.0, eps=1e-10, **kwargs): super().__init__() self.dropout = nn.Dropout(dropout_rate) self.eps = eps def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
ROBINADC/BiGRU-CRF-with-Attention-for-NER
CosineAttention
false
8,723
[ "MIT" ]
27
b9e037ebd6e1d56500ffb60c6030013982c17ded
https://github.com/ROBINADC/BiGRU-CRF-with-Attention-for-NER/tree/b9e037ebd6e1d56500ffb60c6030013982c17ded
TriNTN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/a5/ca5be7cy34jmmh5dby2sccn5kptg6zhrozznub5nmv4sepureeqh.py # Topologically Sorted Source Nodes: [cat, cat_1, cat_2, bilinear_2], Original ATen: [aten.cat, aten.view] # Source node to ATen node mapping: # bilinear_2 => view_6 # cat => cat # cat_1 => cat_1 # cat_2 => cat_2 # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_4, %primals_3], 1), kwargs = {}) # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_9, %primals_3], 1), kwargs = {}) # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_4, %primals_9], -1), kwargs = {}) # %view_6 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%cat_2, [-1, 8]), kwargs = {}) triton_poi_fused_cat_view_0 = async_compile.triton('triton_poi_fused_cat_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_view_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_view_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.load(in_ptr2 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp4, tmp11, tmp9) tmp13 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + (x2), tmp10, xmask) tl.store(out_ptr1 + (x2), tmp12, xmask) tl.store(out_ptr2 + (x2), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/qw/cqw33f4xf6xs4gpmtl36somstlfpstoiih2hjqek3ld76xyp5kwr.py # Topologically Sorted Source Nodes: [bilinear, add, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add_1 # bilinear => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_trilinear, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mm), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 5 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/pf/cpffedz6mlqqwjth7i5kelax2hz72mgai5czgb4uwju6syoykg45.py # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_3 => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat_2, %primals_3], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 8, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + ((4*x1) + x0), tmp7 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp0 >= tmp5 tmp10 = tmp9 & tmp4 tmp11 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp6, tmp8, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 12, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp4, tmp14, tmp18) tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args args.clear() assert_size_stride(primals_1, (5, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (5, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (5, 8), (8, 1)) assert_size_stride(primals_6, (1, 5), (5, 1)) assert_size_stride(primals_7, (5, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (5, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (5, 8), (8, 1)) assert_size_stride(primals_11, (1, 5), (5, 1)) assert_size_stride(primals_12, (5, 8, 4), (32, 4, 1)) assert_size_stride(primals_13, (5, ), (1, )) assert_size_stride(primals_14, (5, 12), (12, 1)) assert_size_stride(primals_15, (1, 5), (5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear] buf0 = torch.ops.aten._trilinear.default(primals_4, primals_1, primals_3, [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf7 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf10 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat, cat_1, cat_2, bilinear_2], Original ATen: [aten.cat, aten.view] stream0 = get_raw_stream(0) triton_poi_fused_cat_view_0.run(primals_4, primals_3, primals_9, buf2, buf7, buf10, 32, grid=grid(32), stream=stream0) buf3 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (8, 5), (1, 8), 0), out=buf3) del primals_5 buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [bilinear, add, tanh], Original ATen: [aten.add, aten.tanh] triton_poi_fused_add_tanh_1.run(buf4, primals_2, buf3, 20, grid=grid(20), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [bilinear_1], Original ATen: [aten._trilinear] buf5 = torch.ops.aten._trilinear.default(primals_9, primals_7, primals_3, [1, 3], [0], [1, 2], [2, 3]) del primals_7 buf6 = buf5 del buf5 buf8 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(buf7, reinterpret_tensor(primals_10, (8, 5), (1, 8), 0), out=buf8) del primals_10 buf9 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [bilinear_1, add_1, tanh_1], Original ATen: [aten.add, aten.tanh] triton_poi_fused_add_tanh_1.run(buf9, primals_8, buf8, 20, grid=grid(20), stream=stream0) del primals_8 # Topologically Sorted Source Nodes: [bilinear_2], Original ATen: [aten._trilinear] buf11 = torch.ops.aten._trilinear.default(buf10, primals_12, primals_3, [1, 3], [0], [1, 2], [2, 3]) del primals_12 buf12 = buf11 del buf11 buf13 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(primals_4, primals_9, primals_3, buf13, 48, grid=grid(48), stream=stream0) buf14 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.mm] extern_kernels.mm(buf13, reinterpret_tensor(primals_14, (12, 5), (1, 12), 0), out=buf14) del primals_14 buf15 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [bilinear_2, add_2, tanh_2], Original ATen: [aten.add, aten.tanh] triton_poi_fused_add_tanh_1.run(buf15, primals_13, buf14, 20, grid=grid(20), stream=stream0) del buf14 del primals_13 buf16 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add] extern_kernels._mm_plus_mm(buf15, reinterpret_tensor(primals_15, (5, 1), (1, 5), 0), buf4, reinterpret_tensor(primals_6, (5, 1), (1, 5), 0), out=buf16) buf17 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.addmm(buf16, buf9, reinterpret_tensor(primals_11, (5, 1), (1, 5), 0), alpha=1, beta=1, out=buf17) del buf16 return (buf17, primals_4, primals_3, buf2, buf4, primals_9, buf7, buf9, buf10, buf13, buf15, primals_15, primals_11, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((5, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((5, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 5), (5, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((5, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((5, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, 5), (5, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((5, 8, 4), (32, 4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((5, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((1, 5), (5, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class RawNTN(nn.Module): def __init__(self, l_dim, r_dim, k=5, non_linear=torch.tanh): super(RawNTN, self).__init__() self.u_R = nn.Linear(k, 1, bias=False) self.f = non_linear self.W = nn.Bilinear(l_dim, r_dim, k, bias=True) self.V = nn.Linear(l_dim + r_dim, k, bias=False) def forward(self, e, q): """ e1: tensor of size (*, l_dim) e2: tensor of size (*, r_dim) return: tensor of size (*, 1) """ return self.u_R(self.f(self.W(e, q) + self.V(torch.cat((e, q), 1)))) class TriNTN(nn.Module): def __init__(self, l_dim, r_dim, k=5, non_linear=F.tanh): super(TriNTN, self).__init__() self.match_l = RawNTN(l_dim, r_dim, k, non_linear) self.match_r = RawNTN(l_dim, r_dim, k, non_linear) self.match = RawNTN(l_dim * 2, r_dim, k, non_linear) def forward(self, e1, e2, q): l_scores = self.match_l(e1, q) r_scores = self.match_r(e2, q) scores = self.match(torch.cat((e1, e2), -1), q) + l_scores + r_scores return scores def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'l_dim': 4, 'r_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_view_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.load(in_ptr2 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tl.where(tmp4, tmp11, tmp9) tmp13 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x2, tmp10, xmask) tl.store(out_ptr1 + x2, tmp12, xmask) tl.store(out_ptr2 + x2, tmp14, xmask) @triton.jit def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 8, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 4, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + (4 * x1 + x0), tmp7 & xmask, eviction_policy= 'evict_last', other=0.0) tmp9 = tmp0 >= tmp5 tmp10 = tmp9 & tmp4 tmp11 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.where(tmp6, tmp8, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tl.full([1], 12, tl.int64) tmp18 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tl.where(tmp4, tmp14, tmp18) tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (5, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (5,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (5, 8), (8, 1)) assert_size_stride(primals_6, (1, 5), (5, 1)) assert_size_stride(primals_7, (5, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (5,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (5, 8), (8, 1)) assert_size_stride(primals_11, (1, 5), (5, 1)) assert_size_stride(primals_12, (5, 8, 4), (32, 4, 1)) assert_size_stride(primals_13, (5,), (1,)) assert_size_stride(primals_14, (5, 12), (12, 1)) assert_size_stride(primals_15, (1, 5), (5, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(primals_4, primals_1, primals_3, [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf7 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf10 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_view_0[grid(32)](primals_4, primals_3, primals_9, buf2, buf7, buf10, 32, XBLOCK=32, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (8, 5), (1, 8 ), 0), out=buf3) del primals_5 buf4 = buf1 del buf1 triton_poi_fused_add_tanh_1[grid(20)](buf4, primals_2, buf3, 20, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf5 = torch.ops.aten._trilinear.default(primals_9, primals_7, primals_3, [1, 3], [0], [1, 2], [2, 3]) del primals_7 buf6 = buf5 del buf5 buf8 = buf3 del buf3 extern_kernels.mm(buf7, reinterpret_tensor(primals_10, (8, 5), (1, 8), 0), out=buf8) del primals_10 buf9 = buf6 del buf6 triton_poi_fused_add_tanh_1[grid(20)](buf9, primals_8, buf8, 20, XBLOCK=32, num_warps=1, num_stages=1) del primals_8 buf11 = torch.ops.aten._trilinear.default(buf10, primals_12, primals_3, [1, 3], [0], [1, 2], [2, 3]) del primals_12 buf12 = buf11 del buf11 buf13 = empty_strided_cuda((4, 12), (12, 1), torch.float32) triton_poi_fused_cat_2[grid(48)](primals_4, primals_9, primals_3, buf13, 48, XBLOCK=64, num_warps=1, num_stages=1) buf14 = buf8 del buf8 extern_kernels.mm(buf13, reinterpret_tensor(primals_14, (12, 5), (1, 12), 0), out=buf14) del primals_14 buf15 = buf12 del buf12 triton_poi_fused_add_tanh_1[grid(20)](buf15, primals_13, buf14, 20, XBLOCK=32, num_warps=1, num_stages=1) del buf14 del primals_13 buf16 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels._mm_plus_mm(buf15, reinterpret_tensor(primals_15, (5, 1), (1, 5), 0), buf4, reinterpret_tensor(primals_6, (5, 1), (1, 5), 0), out=buf16) buf17 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(buf16, buf9, reinterpret_tensor(primals_11, (5, 1), (1, 5), 0), alpha=1, beta=1, out=buf17) del buf16 return (buf17, primals_4, primals_3, buf2, buf4, primals_9, buf7, buf9, buf10, buf13, buf15, primals_15, primals_11, primals_6) class RawNTN(nn.Module): def __init__(self, l_dim, r_dim, k=5, non_linear=torch.tanh): super(RawNTN, self).__init__() self.u_R = nn.Linear(k, 1, bias=False) self.f = non_linear self.W = nn.Bilinear(l_dim, r_dim, k, bias=True) self.V = nn.Linear(l_dim + r_dim, k, bias=False) def forward(self, e, q): """ e1: tensor of size (*, l_dim) e2: tensor of size (*, r_dim) return: tensor of size (*, 1) """ return self.u_R(self.f(self.W(e, q) + self.V(torch.cat((e, q), 1)))) class TriNTNNew(nn.Module): def __init__(self, l_dim, r_dim, k=5, non_linear=F.tanh): super(TriNTNNew, self).__init__() self.match_l = RawNTN(l_dim, r_dim, k, non_linear) self.match_r = RawNTN(l_dim, r_dim, k, non_linear) self.match = RawNTN(l_dim * 2, r_dim, k, non_linear) def forward(self, input_0, input_1, input_2): primals_6 = self.match_l.u_R.weight primals_1 = self.match_l.W.weight primals_2 = self.match_l.W.bias primals_5 = self.match_l.V.weight primals_11 = self.match_r.u_R.weight primals_7 = self.match_r.W.weight primals_8 = self.match_r.W.bias primals_10 = self.match_r.V.weight primals_15 = self.match.u_R.weight primals_12 = self.match.W.weight primals_13 = self.match.W.bias primals_14 = self.match.V.weight primals_3 = input_0 primals_4 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
QingkaiZeng/GenTaxo
TriNTN
false
8,724
[ "MIT" ]
28
10257a1714d14c6a4c49cbfa0b507408f718cdf0
https://github.com/QingkaiZeng/GenTaxo/tree/10257a1714d14c6a4c49cbfa0b507408f718cdf0
BIM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/zn/czn6cztle4peyy4pa7mkag53s34sjkn6wpenptu6ttfuvhgzzrup.py # Topologically Sorted Source Nodes: [e], Original ATen: [aten.cat] # Source node to ATen node mapping: # e => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 8, 4), (32, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [e], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear] buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf2 = buf1 del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 8, 4), (32, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BIM(nn.Module): def __init__(self, l_dim, r_dim): super(BIM, self).__init__() self.W = nn.Bilinear(l_dim * 2, r_dim, 1, bias=False) def forward(self, e1, e2, q): """ e1: tensor of size (*, l_dim) e2: tensor of size (*, r_dim) return: tensor of size (*, 1) """ e = torch.cat((e1, e2), -1) return self.W(e, q) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'l_dim': 4, 'r_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 8, 4), (32, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, ( 64, 8), (8, 1), 0), primals_3, reinterpret_tensor(primals_4, ( 64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf2 = buf1 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor( primals_4, (64, 4), (4, 1), 0) class BIMNew(nn.Module): def __init__(self, l_dim, r_dim): super(BIMNew, self).__init__() self.W = nn.Bilinear(l_dim * 2, r_dim, 1, bias=False) def forward(self, input_0, input_1, input_2): primals_3 = self.W.weight primals_1 = input_0 primals_2 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
QingkaiZeng/GenTaxo
BIM
false
8,725
[ "MIT" ]
28
10257a1714d14c6a4c49cbfa0b507408f718cdf0
https://github.com/QingkaiZeng/GenTaxo/tree/10257a1714d14c6a4c49cbfa0b507408f718cdf0
HighwayNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/v7/cv7rclhzdb2c7qoumqndxevg24mioljdnzprrwaz2h2f5vnuzevx.py # Topologically Sorted Source Nodes: [g, relu, mul, sub, mul_1, y], Original ATen: [aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.add] # Source node to ATen node mapping: # g => sigmoid # mul => mul # mul_1 => mul_1 # relu => relu # sub => sub # y => add # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %relu), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_relu_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp8 = tl.load(in_ptr2 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tmp1 * tmp4 tmp6 = 1.0 tmp7 = tmp6 - tmp1 tmp9 = tmp7 * tmp8 tmp10 = tmp5 + tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [g, relu, mul, sub, mul_1, y], Original ATen: [aten.sigmoid, aten.relu, aten.mul, aten.rsub, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_0.run(buf1, buf0, primals_3, buf2, 256, grid=grid(256), stream=stream0) return (buf2, primals_3, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class HighwayNetwork(nn.Module): def __init__(self, size): super().__init__() self.W1 = nn.Linear(size, size) self.W2 = nn.Linear(size, size) self.W1.bias.data.fill_(0.0) def forward(self, x): x1 = self.W1(x) x2 = self.W2(x) g = torch.sigmoid(x2) y = g * F.relu(x1) + (1.0 - g) * x return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp8 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tmp1 * tmp4 tmp6 = 1.0 tmp7 = tmp6 - tmp1 tmp9 = tmp7 * tmp8 tmp10 = tmp5 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf1, buf0, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_3, buf0, buf1 class HighwayNetworkNew(nn.Module): def __init__(self, size): super().__init__() self.W1 = nn.Linear(size, size) self.W2 = nn.Linear(size, size) self.W1.bias.data.fill_(0.0) def forward(self, input_0): primals_1 = self.W1.weight primals_2 = self.W1.bias primals_4 = self.W2.weight primals_5 = self.W2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Rongjiehuang/Multiband-WaveRNN
HighwayNetwork
false
8,726
[ "MIT" ]
18
432e449678220eed841fcb4971415e2e0ac4d9bb
https://github.com/Rongjiehuang/Multiband-WaveRNN/tree/432e449678220eed841fcb4971415e2e0ac4d9bb
Generator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/7i/c7iu56pz7o7inlt4gctiwrhvgn4z2ylu3colemjdqr7qfgtjkl5h.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # output_1 => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_7, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_7, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) % 5 x5 = xindex x6 = (xindex // 4) tmp3 = tl.load(in_ptr0 + (x5), xmask) tmp6 = tl.load(in_ptr0 + (4*x6), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (4*x6)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x6)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*x6)), xmask, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 4, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = float("-inf") tmp5 = tl.where(tmp2, tmp4, tmp3) tmp7 = tl.where(tmp2, tmp4, tmp6) tmp9 = tl.where(tmp2, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tl.where(tmp2, tmp4, tmp11) tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp15 = tl.where(tmp2, tmp4, tmp14) tmp16 = triton_helpers.maximum(tmp13, tmp15) tmp17 = tmp5 - tmp16 tl.store(out_ptr0 + (x5), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zb/czbsuot3r4t3p6z6v7ghbuh7edbxmiiojs3cdxb7sfsoldvospsm.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # output_1 => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 5, 4, 4), (80, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((80, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(buf0, buf1, 320, grid=grid(320), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 5, 4, 4), (80, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf1, buf2, 320, grid=grid(320), stream=stream0) del buf1 return (buf2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 5, 4, 4), (80, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.distributed import torch import torch.nn as nn def gumbel_softmax(logits, tau=1.0, hard=False, log_mode=True, dim=-1): while True: gumbels = -torch.empty_like(logits).exponential_().log() gumbels = (logits + gumbels) / tau if log_mode: y_soft = gumbels.log_softmax(dim) else: y_soft = gumbels.softmax(dim) if torch.sum(torch.isnan(y_soft)).item() < 0.01: break if hard: index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft else: ret = y_soft return ret class Generator(nn.Module): def __init__(self, vocab_size, dec_hidden_size, pad_idx): super(Generator, self).__init__() self.linear = nn.Linear(dec_hidden_size, vocab_size) self.softmax = nn.LogSoftmax(dim=-1) self.pad_idx = pad_idx def forward(self, x, use_gumbel_softmax=False): output = self.linear(x) output[:, self.pad_idx] = -float('inf') if use_gumbel_softmax: output = gumbel_softmax(output, log_mode=True, dim=-1) else: output = self.softmax(output) return output def get_inputs(): return [torch.rand([4, 5, 4, 4])] def get_init_inputs(): return [[], {'vocab_size': 4, 'dec_hidden_size': 4, 'pad_idx': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.distributed import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 5 x5 = xindex x6 = xindex // 4 tmp3 = tl.load(in_ptr0 + x5, xmask) tmp6 = tl.load(in_ptr0 + 4 * x6, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x6), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x6), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x6), xmask, eviction_policy='evict_last' ) tmp0 = x2 tmp1 = tl.full([1], 4, tl.int32) tmp2 = tmp0 == tmp1 tmp4 = float('-inf') tmp5 = tl.where(tmp2, tmp4, tmp3) tmp7 = tl.where(tmp2, tmp4, tmp6) tmp9 = tl.where(tmp2, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tl.where(tmp2, tmp4, tmp11) tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp15 = tl.where(tmp2, tmp4, tmp14) tmp16 = triton_helpers.maximum(tmp13, tmp15) tmp17 = tmp5 - tmp16 tl.store(out_ptr0 + x5, tmp17, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 5, 4, 4), (80, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((80, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(320)](buf0, buf1, 320, XBLOCK= 128, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 5, 4, 4), (80, 16, 4, 1), 0) del buf0 triton_poi_fused__log_softmax_1[grid(320)](buf1, buf2, 320, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), buf2 def gumbel_softmax(logits, tau=1.0, hard=False, log_mode=True, dim=-1): while True: gumbels = -torch.empty_like(logits).exponential_().log() gumbels = (logits + gumbels) / tau if log_mode: y_soft = gumbels.log_softmax(dim) else: y_soft = gumbels.softmax(dim) if torch.sum(torch.isnan(y_soft)).item() < 0.01: break if hard: index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft else: ret = y_soft return ret class GeneratorNew(nn.Module): def __init__(self, vocab_size, dec_hidden_size, pad_idx): super(GeneratorNew, self).__init__() self.linear = nn.Linear(dec_hidden_size, vocab_size) self.softmax = nn.LogSoftmax(dim=-1) self.pad_idx = pad_idx def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RowitZou/RankAE
Generator
false
8,727
[ "MIT" ]
23
d47ab58aa4fda203c551e36cbe04edd564b76d89
https://github.com/RowitZou/RankAE/tree/d47ab58aa4fda203c551e36cbe04edd564b76d89
DiceLoss_pt
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/qr/cqro6zvv4qv4seqkgrsxl6knrjgikjev3vgel76rrnodlrxot22k.py # Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, score, sum_4, truediv_1, out], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # intersection => sum_1 # mul => mul # mul_1 => mul_1 # out => sub # score => div # sum_2 => sum_2 # sum_3 => sum_3 # sum_4 => sum_4 # truediv_1 => div_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1.0), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = tl.broadcast_to(tmp1, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = tl.broadcast_to(tmp2, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 2.0 tmp14 = tmp6 * tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = tmp9 + tmp12 tmp18 = tmp17 + tmp15 tmp19 = tmp16 / tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tmp22 = tmp15 - tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp22, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, score, sum_4, truediv_1, out], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class DiceLoss_pt(nn.Module): def __init__(self, weight=None, size_average=True): super(DiceLoss_pt, self).__init__() def forward(self, y_pred, y_true): smooth = 1.0 y_pred_sig = F.sigmoid(y_pred) num = y_true.size(0) x = y_pred_sig.view(num, -1).float() y = y_true.view(num, -1).float() intersection = torch.sum(x * y) score = (2.0 * intersection + smooth) / (torch.sum(x) + torch.sum(y ) + smooth) out = 1 - torch.sum(score) / num return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = tl.broadcast_to(tmp1, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = tl.broadcast_to(tmp2, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 2.0 tmp14 = tmp6 * tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = tmp9 + tmp12 tmp18 = tmp17 + tmp15 tmp19 = tmp16 / tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tmp22 = tmp15 - tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class DiceLoss_ptNew(nn.Module): def __init__(self, weight=None, size_average=True): super(DiceLoss_ptNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
SCCH-KVS/training-engine
DiceLoss_pt
false
8,728
[ "Apache-2.0" ]
17
dc52b7a06884f967c7c1aabfba39802dd2983162
https://github.com/SCCH-KVS/training-engine/tree/dc52b7a06884f967c7c1aabfba39802dd2983162
My_Tanh
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/lf/clfyapqxbe4jzlm476wzwdvtwbrdm2t3dm7yfwx2xntywiujedaf.py # Topologically Sorted Source Nodes: [tanh, add, mul], Original ATen: [aten.tanh, aten.add, aten.mul] # Source node to ATen node mapping: # add => add # mul => mul # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%arg0_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {}) triton_poi_fused_add_mul_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, add, mul], Original ATen: [aten.tanh, aten.add, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class My_Tanh(nn.Module): def __init__(self): super(My_Tanh, self).__init__() self.tanh = nn.Tanh() def forward(self, x): return 0.5 * (self.tanh(x) + 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class My_TanhNew(nn.Module): def __init__(self): super(My_TanhNew, self).__init__() self.tanh = nn.Tanh() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SUTDBrainLab/MGP-VAE
My_Tanh
false
8,731
[ "MIT" ]
30
0b7c252f9f7bdcdf3c4177ac40585633a0e98a0f
https://github.com/SUTDBrainLab/MGP-VAE/tree/0b7c252f9f7bdcdf3c4177ac40585633a0e98a0f
PreNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/cn/ccnvkf7kfnskbbfy2kwx55oghjftngamwdttghryrfs4g3fay72l.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/cq/ccqbfbzeetpsc2d2hbydlb6zh44oakzviqnnh5rue727xy45hkgn.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_4 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 256), (256, 1)) assert_size_stride(primals_5, (128, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 16384, grid=grid(16384), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf4, 8192, grid=grid(8192), stream=stream0) del primals_5 return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), buf4, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PreNet(nn.Module): def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5): super().__init__() self.fc1 = nn.Linear(in_dims, fc1_dims) self.fc2 = nn.Linear(fc1_dims, fc2_dims) self.p = dropout def forward(self, x): x = self.fc1(x) x = F.relu(x) x = F.dropout(x, self.p, training=self.training) x = self.fc2(x) x = F.relu(x) x = F.dropout(x, self.p, training=self.training) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dims': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 256), (256, 1)) assert_size_stride(primals_5, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf5, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf3, primals_5, buf4, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), buf4, primals_4, buf5 class PreNetNew(nn.Module): def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5): super().__init__() self.fc1 = nn.Linear(in_dims, fc1_dims) self.fc2 = nn.Linear(fc1_dims, fc2_dims) self.p = dropout def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Rongjiehuang/Multiband-WaveRNN
PreNet
false
8,733
[ "MIT" ]
18
432e449678220eed841fcb4971415e2e0ac4d9bb
https://github.com/Rongjiehuang/Multiband-WaveRNN/tree/432e449678220eed841fcb4971415e2e0ac4d9bb
GNNExplainerProbe
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/w4/cw4n4pbxxbljnrd4yatv6v33qk5dxjyk3ms7gaxvcbxolxuor5ri.py # Topologically Sorted Source Nodes: [s, neg, log, mul, sub, log_1, mul_1, mask_ent, mean, sum_1, mul_2, penalty], Original ATen: [aten.sigmoid, aten.neg, aten.log, aten.mul, aten.rsub, aten.sub, aten.mean, aten.sum, aten.add] # Source node to ATen node mapping: # log => log # log_1 => log_1 # mask_ent => sub_2 # mean => mean # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # neg => neg # penalty => add # s => sigmoid # sub => sub # sum_1 => sum_1 # Graph fragment: # %sigmoid : [num_users=5] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sigmoid,), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %log), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %log_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sigmoid,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mul_2), kwargs = {}) triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0 = async_compile.triton('triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.sigmoid(tmp0) tmp2 = -tmp1 tmp3 = tl_math.log(tmp1) tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tl_math.log(tmp6) tmp8 = tmp6 * tmp7 tmp9 = tmp4 - tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp16 = 4.0 tmp17 = tmp12 / tmp16 tmp18 = 0.5 tmp19 = tmp15 * tmp18 tmp20 = tmp17 + tmp19 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp1, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [s, neg, log, mul, sub, log_1, mul_1, mask_ent, mean, sum_1, mul_2, penalty], Original ATen: [aten.sigmoid, aten.neg, aten.log, aten.mul, aten.rsub, aten.sub, aten.mean, aten.sum, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0.run(buf3, primals_1, buf0, 1, 4, grid=grid(1), stream=stream0) del primals_1 return (buf0, buf3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch class AbstractTorchModule(torch.nn.Module): def __init__(self): torch.nn.Module.__init__(self) def save(self, path): None torch.save(self.state_dict(), path) def load(self, path): None self.load_state_dict(torch.load(path, map_location=self.device)) def set_device(self, device): self.device = device self class GNNExplainerProbe(AbstractTorchModule): def __init__(self, num_edges, num_layers, init_strategy='normal', const_val=1.0): super(GNNExplainerProbe, self).__init__() mask = torch.empty((num_layers, num_edges)) if init_strategy == 'normal': std = torch.nn.init.calculate_gain('relu') * math.sqrt(2.0 / (2 * math.sqrt(num_edges))) with torch.no_grad(): mask.normal_(1.0, std) elif init_strategy == 'const': torch.nn.init.constant_(mask, const_val) self.mask = torch.nn.Parameter(mask) def forward(self): s = torch.sigmoid(self.mask) mask_ent = -s * torch.log(s) - (1 - s) * torch.log(1 - s) penalty = mask_ent.mean() + 0.5 * s.sum() return s, penalty def get_inputs(): return [] def get_init_inputs(): return [[], {'num_edges': 4, 'num_layers': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.sigmoid(tmp0) tmp2 = -tmp1 tmp3 = tl_math.log(tmp1) tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp1 tmp7 = tl_math.log(tmp6) tmp8 = tmp6 * tmp7 tmp9 = tmp4 - tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp16 = 4.0 tmp17 = tmp12 / tmp16 tmp18 = 0.5 tmp19 = tmp15 * tmp18 tmp20 = tmp17 + tmp19 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp1, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_sub_sum_0[grid(1)]( buf3, primals_1, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 return buf0, buf3, buf0 class AbstractTorchModule(torch.nn.Module): def __init__(self): torch.nn.Module.__init__(self) def save(self, path): None torch.save(self.state_dict(), path) def load(self, path): None self.load_state_dict(torch.load(path, map_location=self.device)) def set_device(self, device): self.device = device self class GNNExplainerProbeNew(AbstractTorchModule): def __init__(self, num_edges, num_layers, init_strategy='normal', const_val=1.0): super(GNNExplainerProbeNew, self).__init__() mask = torch.empty((num_layers, num_edges)) if init_strategy == 'normal': std = torch.nn.init.calculate_gain('relu') * math.sqrt(2.0 / (2 * math.sqrt(num_edges))) with torch.no_grad(): mask.normal_(1.0, std) elif init_strategy == 'const': torch.nn.init.constant_(mask, const_val) self.mask = torch.nn.Parameter(mask) def forward(self): primals_1 = self.mask output = call([primals_1]) return output[0], output[1]
S-Eggers/GraphMask
GNNExplainerProbe
false
8,735
[ "MIT" ]
28
9e431a541279801ec46a5b38ed57b2033f795240
https://github.com/S-Eggers/GraphMask/tree/9e431a541279801ec46a5b38ed57b2033f795240
Normalize01
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/oj/coje67cr5cqcknalck7l4jbdj3hq7t6wu7eizey5gsludxsn3ixp.py # Topologically Sorted Source Nodes: [min_val, max_val], Original ATen: [aten.min, aten.max] # Source node to ATen node mapping: # max_val => max_1 # min_val => min_1 # Graph fragment: # %min_1 : [num_users=2] = call_function[target=torch.ops.aten.min.default](args = (%select,), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_1,), kwargs = {}) triton_per_fused_max_min_0 = async_compile.triton('triton_per_fused_max_min_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_min_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_min_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.min2(tmp1, 1)[:, None] tmp5 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yq/cyqaq5dj7uuujwecu4kf26sxqg4ouqstaundkeorqadqbxojexrv.py # Topologically Sorted Source Nodes: [min_val_1, max_val_1], Original ATen: [aten.min, aten.max] # Source node to ATen node mapping: # max_val_1 => max_2 # min_val_1 => min_2 # Graph fragment: # %min_2 : [num_users=2] = call_function[target=torch.ops.aten.min.default](args = (%select_6,), kwargs = {}) # %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_7,), kwargs = {}) triton_per_fused_max_min_1 = async_compile.triton('triton_per_fused_max_min_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_min_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_min_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (64 + r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.min2(tmp1, 1)[:, None] tmp5 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/gv/cgvd5qkauuvf7j4d2cf4upp4zdilt6t2kbw2tio5mcfbjzevfepw.py # Topologically Sorted Source Nodes: [min_val_2, max_val_2], Original ATen: [aten.min, aten.max] # Source node to ATen node mapping: # max_val_2 => max_3 # min_val_2 => min_3 # Graph fragment: # %min_3 : [num_users=2] = call_function[target=torch.ops.aten.min.default](args = (%select_13,), kwargs = {}) # %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_14,), kwargs = {}) triton_per_fused_max_min_2 = async_compile.triton('triton_per_fused_max_min_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_min_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_min_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (128 + r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.min2(tmp1, 1)[:, None] tmp5 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/sz/csz7pjuraxvkquk5qyffbina4ejtvx5fituiicoc7seg6eudvzn6.py # Topologically Sorted Source Nodes: [min_val_3, max_val_3], Original ATen: [aten.min, aten.max] # Source node to ATen node mapping: # max_val_3 => max_4 # min_val_3 => min_4 # Graph fragment: # %min_4 : [num_users=2] = call_function[target=torch.ops.aten.min.default](args = (%select_20,), kwargs = {}) # %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_21,), kwargs = {}) triton_per_fused_max_min_3 = async_compile.triton('triton_per_fused_max_min_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_min_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_min_3(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.min2(tmp1, 1)[:, None] tmp5 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/nc/cncngsnacjbtykxtvhfb4jhznalqts7a4i4yjvps3nrjxtr75vbd.py # Topologically Sorted Source Nodes: [result_noisy_1, sub, sub_1, truediv, sub_2, sub_3, truediv_1, sub_4, sub_5, truediv_2, sub_6, sub_7, truediv_3], Original ATen: [aten.zeros_like, aten.sub, aten.div] # Source node to ATen node mapping: # result_noisy_1 => full_default # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # sub_3 => sub_3 # sub_4 => sub_4 # sub_5 => sub_5 # sub_6 => sub_6 # sub_7 => sub_7 # truediv => div # truediv_1 => div_1 # truediv_2 => div_2 # truediv_3 => div_3 # Graph fragment: # %full_default : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_2, %min_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%max_1, %min_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sub_1), kwargs = {}) # %select_scatter_default : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default, %div, 0, 0), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_8, %min_2), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%max_2, %min_2), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sub_3), kwargs = {}) # %select_scatter_default_1 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %div_1, 0, 1), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_15, %min_3), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%max_3, %min_3), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_4, %sub_5), kwargs = {}) # %select_scatter_default_2 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %div_2, 0, 2), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_22, %min_4), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%max_4, %min_4), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_6, %sub_7), kwargs = {}) # %select_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_2, %div_3, 0, 3), kwargs = {}) triton_poi_fused_div_sub_zeros_like_4 = async_compile.triton('triton_poi_fused_div_sub_zeros_like_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_zeros_like_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_zeros_like_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) x0 = xindex % 64 x2 = xindex tmp3 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr2 + (0)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (0)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr4 + (0)) tmp18 = tl.broadcast_to(tmp17, [XBLOCK]) tmp26 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr5 + (0)) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp30 = tl.load(in_ptr6 + (0)) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp36 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr7 + (0)) tmp38 = tl.broadcast_to(tmp37, [XBLOCK]) tmp40 = tl.load(in_ptr8 + (0)) tmp41 = tl.broadcast_to(tmp40, [XBLOCK]) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp6 = tmp3 - tmp5 tmp9 = tmp8 - tmp5 tmp10 = tmp6 / tmp9 tmp11 = tl.full([1], 0, tl.int32) tmp12 = tmp0 == tmp11 tmp16 = tmp13 - tmp15 tmp19 = tmp18 - tmp15 tmp20 = tmp16 / tmp19 tmp21 = 0.0 tmp22 = tl.where(tmp12, tmp20, tmp21) tmp23 = tl.where(tmp2, tmp10, tmp22) tmp24 = tl.full([1], 3, tl.int32) tmp25 = tmp0 == tmp24 tmp29 = tmp26 - tmp28 tmp32 = tmp31 - tmp28 tmp33 = tmp29 / tmp32 tmp34 = tl.full([1], 2, tl.int32) tmp35 = tmp0 == tmp34 tmp39 = tmp36 - tmp38 tmp42 = tmp41 - tmp38 tmp43 = tmp39 / tmp42 tmp44 = tl.where(tmp35, tmp43, tmp23) tmp45 = tl.where(tmp25, tmp33, tmp44) tl.store(in_out_ptr0 + (x2), tmp45, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [min_val, max_val], Original ATen: [aten.min, aten.max] stream0 = get_raw_stream(0) triton_per_fused_max_min_0.run(arg0_1, buf0, buf1, 1, 64, grid=grid(1), stream=stream0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [min_val_1, max_val_1], Original ATen: [aten.min, aten.max] triton_per_fused_max_min_1.run(arg0_1, buf2, buf3, 1, 64, grid=grid(1), stream=stream0) buf5 = empty_strided_cuda((), (), torch.float32) buf6 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [min_val_2, max_val_2], Original ATen: [aten.min, aten.max] triton_per_fused_max_min_2.run(arg0_1, buf5, buf6, 1, 64, grid=grid(1), stream=stream0) buf7 = empty_strided_cuda((), (), torch.float32) buf8 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [min_val_3, max_val_3], Original ATen: [aten.min, aten.max] triton_per_fused_max_min_3.run(arg0_1, buf7, buf8, 1, 64, grid=grid(1), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [result_noisy_1, sub, sub_1, truediv, sub_2, sub_3, truediv_1, sub_4, sub_5, truediv_2, sub_6, sub_7, truediv_3], Original ATen: [aten.zeros_like, aten.sub, aten.div] triton_poi_fused_div_sub_zeros_like_4.run(buf9, arg0_1, buf2, buf3, buf0, buf1, buf7, buf8, buf5, buf6, 256, grid=grid(256), stream=stream0) del arg0_1 del buf0 del buf1 del buf2 del buf3 del buf5 del buf6 del buf7 del buf8 return (buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Normalize01(nn.Module): def __init__(self): super().__init__() def forward(self, result_noisy): Nbatch = result_noisy.size(0) result_noisy_01 = torch.zeros_like(result_noisy) for i in range(Nbatch): min_val = result_noisy[i, :, :, :].min() max_val = result_noisy[i, :, :, :].max() result_noisy_01[i, :, :, :] = (result_noisy[i, :, :, :] - min_val ) / (max_val - min_val) return result_noisy_01 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_max_min_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.min2(tmp1, 1)[:, None] tmp5 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_per_fused_max_min_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (64 + r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.min2(tmp1, 1)[:, None] tmp5 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_per_fused_max_min_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (128 + r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.min2(tmp1, 1)[:, None] tmp5 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_per_fused_max_min_3(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.min2(tmp1, 1)[:, None] tmp5 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) @triton.jit def triton_poi_fused_div_sub_zeros_like_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp3 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr2 + 0) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp13 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + 0) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr4 + 0) tmp18 = tl.broadcast_to(tmp17, [XBLOCK]) tmp26 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr5 + 0) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp30 = tl.load(in_ptr6 + 0) tmp31 = tl.broadcast_to(tmp30, [XBLOCK]) tmp36 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr7 + 0) tmp38 = tl.broadcast_to(tmp37, [XBLOCK]) tmp40 = tl.load(in_ptr8 + 0) tmp41 = tl.broadcast_to(tmp40, [XBLOCK]) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp6 = tmp3 - tmp5 tmp9 = tmp8 - tmp5 tmp10 = tmp6 / tmp9 tmp11 = tl.full([1], 0, tl.int32) tmp12 = tmp0 == tmp11 tmp16 = tmp13 - tmp15 tmp19 = tmp18 - tmp15 tmp20 = tmp16 / tmp19 tmp21 = 0.0 tmp22 = tl.where(tmp12, tmp20, tmp21) tmp23 = tl.where(tmp2, tmp10, tmp22) tmp24 = tl.full([1], 3, tl.int32) tmp25 = tmp0 == tmp24 tmp29 = tmp26 - tmp28 tmp32 = tmp31 - tmp28 tmp33 = tmp29 / tmp32 tmp34 = tl.full([1], 2, tl.int32) tmp35 = tmp0 == tmp34 tmp39 = tmp36 - tmp38 tmp42 = tmp41 - tmp38 tmp43 = tmp39 / tmp42 tmp44 = tl.where(tmp35, tmp43, tmp23) tmp45 = tl.where(tmp25, tmp33, tmp44) tl.store(in_out_ptr0 + x2, tmp45, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_max_min_0[grid(1)](arg0_1, buf0, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_max_min_1[grid(1)](arg0_1, buf2, buf3, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((), (), torch.float32) buf6 = empty_strided_cuda((), (), torch.float32) triton_per_fused_max_min_2[grid(1)](arg0_1, buf5, buf6, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf7 = empty_strided_cuda((), (), torch.float32) buf8 = empty_strided_cuda((), (), torch.float32) triton_per_fused_max_min_3[grid(1)](arg0_1, buf7, buf8, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = buf4 del buf4 triton_poi_fused_div_sub_zeros_like_4[grid(256)](buf9, arg0_1, buf2, buf3, buf0, buf1, buf7, buf8, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf0 del buf1 del buf2 del buf3 del buf5 del buf6 del buf7 del buf8 return buf9, class Normalize01New(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ScarWar/DeepSTORM3D
Normalize01
false
8,736
[ "MIT" ]
25
8ba5bc61120abedba9c1b24a994e616e280bdda2
https://github.com/ScarWar/DeepSTORM3D/tree/8ba5bc61120abedba9c1b24a994e616e280bdda2
rSoftMax
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/vu/cvueuunwugafvwptu24xkxdfuddgwbwo3rozlvzvsrlth4crynob.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%permute, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 32 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + (x3), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 1, 32), (64, 32, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class rSoftMax(nn.Module): """ (radix-majorize) softmax class input is cardinal-major shaped tensor. transpose to radix-major """ def __init__(self, groups=1, radix=2): super(rSoftMax, self).__init__() self.groups = groups self.radix = radix def forward(self, x): B = x.size(0) x = x.view(B, self.groups, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.view(B, -1, 1, 1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 32 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x3, tmp11, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 1, 32), (64, 32, 32, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 1, 1), 0), class rSoftMaxNew(nn.Module): """ (radix-majorize) softmax class input is cardinal-major shaped tensor. transpose to radix-major """ def __init__(self, groups=1, radix=2): super(rSoftMaxNew, self).__init__() self.groups = groups self.radix = radix def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
STomoya/ResNeSt
rSoftMax
false
8,737
[ "Apache-2.0" ]
13
3b2b4f4a73d138bb1e4ff2b8695be4cf950543da
https://github.com/STomoya/ResNeSt/tree/3b2b4f4a73d138bb1e4ff2b8695be4cf950543da
SVIGlobalMeanPool2D
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/il/cilscfadmnu2fo3ehwc4xiajeek6onsn575xsamwre6n4ml66wx3.py # Topologically Sorted Source Nodes: [mean, x], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # x => mean_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [4]), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [3]), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + (x0), tmp36, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, x], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SVIGlobalMeanPool2D(nn.Module): """ Expects :param x: [examples, samples, channels, H, W] :return: [examples, samples, channels] """ def __init__(self): super(SVIGlobalMeanPool2D, self).__init__() def forward(self, x): x = x.mean(4).mean(3) return x def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + x0, tmp36, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class SVIGlobalMeanPool2DNew(nn.Module): """ Expects :param x: [examples, samples, channels, H, W] :return: [examples, samples, channels] """ def __init__(self): super(SVIGlobalMeanPool2DNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SebFar/radial_bnn
SVIGlobalMeanPool2D
false
8,738
[ "MIT" ]
29
2497e5e009409ac910d609850eae27f7cc74cec2
https://github.com/SebFar/radial_bnn/tree/2497e5e009409ac910d609850eae27f7cc74cec2
Attentive
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/p7/cp7bcgmypisbzybkq2wwegzxvgofvozth3exh24yu77qhzvvgn2e.py # Topologically Sorted Source Nodes: [diag], Original ATen: [aten.diag_embed] # Source node to ATen node mapping: # diag => eq, full_default, iota, where # Graph fragment: # %iota : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%iota, %unsqueeze_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %permute, %full_default), kwargs = {}) triton_poi_fused_diag_embed_0 = async_compile.triton('triton_poi_fused_diag_embed_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_diag_embed_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = x1 tmp2 = tmp0 == tmp1 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [diag], Original ATen: [aten.diag_embed] stream0 = get_raw_stream(0) triton_poi_fused_diag_embed_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [diag, matmul], Original ATen: [aten.diag_embed, aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, out=buf1) del buf0 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Attentive(nn.Module): def __init__(self, isize): super(Attentive, self).__init__() self.w = nn.Parameter(torch.ones(isize)) def forward(self, x): return x @ torch.diag(self.w) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'isize': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_diag_embed_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = x1 tmp2 = tmp0 == tmp1 tmp4 = 0.0 tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(out_ptr0 + x2, tmp5, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_diag_embed_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, out=buf1) del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class AttentiveNew(nn.Module): def __init__(self, isize): super(AttentiveNew, self).__init__() self.w = nn.Parameter(torch.ones(isize)) def forward(self, input_0): primals_1 = self.w primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
SUBLIME-GSL/SUBLIME
Attentive
false
8,739
[ "MIT" ]
19
2c9b193abb3f15ae9bab33815e568010057a5564
https://github.com/SUBLIME-GSL/SUBLIME/tree/2c9b193abb3f15ae9bab33815e568010057a5564
Conv2d_fw
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/gt/cgtkxck7xi4jvt5w42qtfq4jx6nnr2ig32ijc37sud5zoaiejz2w.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_2, %primals_1, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_1, 16, grid=grid(16), stream=stream0) del primals_1 return (buf1, primals_2, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Conv2d_fw(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True): super(Conv2d_fw, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) self.weight.fast = None if self.bias is not None: self.bias.fast = None def forward(self, x): if self.bias is None: if self.weight.fast is not None: out = F.conv2d(x, self.weight.fast, None, stride=self. stride, padding=self.padding) else: out = super(Conv2d_fw, self).forward(x) elif self.weight.fast is not None and self.bias.fast is not None: out = F.conv2d(x, self.weight.fast, self.bias.fast, stride=self .stride, padding=self.padding) else: out = super(Conv2d_fw, self).forward(x) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 return buf1, primals_2, primals_3 class Conv2d_fwNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True): super(Conv2d_fwNew, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias) self.weight.fast = None if self.bias is not None: self.bias.fast = None def forward(self, input_0): primals_2 = self.weight primals_1 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RongKaiWeskerMA/INSTA
Conv2d_fw
false
8,741
[ "MIT" ]
22
298bec0aeac3c1fde7bbcd4dece72ded1056e478
https://github.com/RongKaiWeskerMA/INSTA/tree/298bec0aeac3c1fde7bbcd4dece72ded1056e478
KLD
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/5w/c5wocyrs7mdwldkxmkbvj64nuv7wciqrjbpn2n2udvwyh65wjjn6.py # Topologically Sorted Source Nodes: [exp, pow_1, add, sub, sub_1, sum_1, kl, kl_1], Original ATen: [aten.exp, aten.pow, aten.add, aten.sub, aten.sum, aten.mul, aten.mean] # Source node to ATen node mapping: # add => add # exp => exp # kl => mul # kl_1 => mean # pow_1 => pow_1 # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, %pow_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %arg0_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_1, [1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.5), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) triton_per_fused_add_exp_mean_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_mean_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_mean_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_exp_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp10 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp16 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp18 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp24 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp26 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp1 = tl_math.exp(tmp0) tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tmp6 - tmp0 tmp9 = tl_math.exp(tmp8) tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = tmp12 - tmp5 tmp14 = tmp13 - tmp8 tmp15 = tmp7 + tmp14 tmp17 = tl_math.exp(tmp16) tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp20 - tmp5 tmp22 = tmp21 - tmp16 tmp23 = tmp15 + tmp22 tmp25 = tl_math.exp(tmp24) tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp28 - tmp5 tmp30 = tmp29 - tmp24 tmp31 = tmp23 + tmp30 tmp32 = 0.5 tmp33 = tmp31 * tmp32 tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp37 = 64.0 tmp38 = tmp36 / tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp38, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [exp, pow_1, add, sub, sub_1, sum_1, kl, kl_1], Original ATen: [aten.exp, aten.pow, aten.add, aten.sub, aten.sum, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_exp_mean_mul_pow_sub_sum_0.run(buf2, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class KLD(torch.nn.Module): def __init__(self, reduction='mean'): super(KLD, self).__init__() self.reduction = reduction def forward(self, mu, logvar, mu_2=None, logvar_2=None): """ Calculate the Kullbach-Leibler-Divergence between two Gaussians :param mu: mean of the first Gaussian :param logvar: log(var) of the first Gaussian :param mu_2: mean of the second Gaussian (default: 0) :param logvar_2: log(var) of the second Gaussian (default: 1) :return: loss value """ if mu_2 is None or logvar_2 is None: kl = self.univariate_kl_loss(mu, logvar) else: kl = self.general_kl_loss(mu, logvar, mu_2, logvar_2) if self.reduction == 'mean': kl = torch.mean(kl) elif self.reduction == 'sum': kl = torch.sum(kl) else: raise NotImplementedError( f'reduction method {self.reduction} is not implemented.') return kl def univariate_kl_loss(self, mu, logvar): """ KL loss between the input and a 0 mean, uni-variance Gaussian :param mu: mean of the distribution :param logvar: log variance of the distribution :return: Kulbach Leibler divergence between distribution and Gaussian """ kl = 0.5 * torch.sum(torch.exp(logvar) + mu ** 2 - 1.0 - logvar, dim=1) return kl def general_kl_loss(self, mu_1, logvar_1, mu_2, logvar_2): """ KL loss between two distributions :param mu_1: mean of the first distribution :param logvar_1: log variance of the first distribution :param mu_2: mean of the second distribution :param logvar_2: log variane of the second distribution :return: Kulbach Leibler divergence loss between the two distributions """ kl = logvar_2 - logvar_1 + torch.exp(logvar_1) / torch.exp(logvar_2 ) + (mu_1 - mu_2) ** 2 / torch.exp(logvar_2) - 1 kl = 0.5 * torch.sum(kl) return kl def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp16 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp18 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl_math.exp(tmp0) tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tmp6 - tmp0 tmp9 = tl_math.exp(tmp8) tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = tmp12 - tmp5 tmp14 = tmp13 - tmp8 tmp15 = tmp7 + tmp14 tmp17 = tl_math.exp(tmp16) tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp20 - tmp5 tmp22 = tmp21 - tmp16 tmp23 = tmp15 + tmp22 tmp25 = tl_math.exp(tmp24) tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp28 - tmp5 tmp30 = tmp29 - tmp24 tmp31 = tmp23 + tmp30 tmp32 = 0.5 tmp33 = tmp31 * tmp32 tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp37 = 64.0 tmp38 = tmp36 / tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_exp_mean_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class KLDNew(torch.nn.Module): def __init__(self, reduction='mean'): super(KLDNew, self).__init__() self.reduction = reduction def univariate_kl_loss(self, mu, logvar): """ KL loss between the input and a 0 mean, uni-variance Gaussian :param mu: mean of the distribution :param logvar: log variance of the distribution :return: Kulbach Leibler divergence between distribution and Gaussian """ kl = 0.5 * torch.sum(torch.exp(logvar) + mu ** 2 - 1.0 - logvar, dim=1) return kl def general_kl_loss(self, mu_1, logvar_1, mu_2, logvar_2): """ KL loss between two distributions :param mu_1: mean of the first distribution :param logvar_1: log variance of the first distribution :param mu_2: mean of the second distribution :param logvar_2: log variane of the second distribution :return: Kulbach Leibler divergence loss between the two distributions """ kl = logvar_2 - logvar_1 + torch.exp(logvar_1) / torch.exp(logvar_2 ) + (mu_1 - mu_2) ** 2 / torch.exp(logvar_2) - 1 kl = 0.5 * torch.sum(kl) return kl def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
SchubertLab/mvTCR
KLD
false
8,742
[ "MIT" ]
16
d815749e24650f69ef68054e0078d490af91b71d
https://github.com/SchubertLab/mvTCR/tree/d815749e24650f69ef68054e0078d490af91b71d
TCB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/72/c72d3tkkvywiwcqg6jgisu6yo5ht4ojq47sb3fci6qge6hp3k5zc.py # Topologically Sorted Source Nodes: [conv2d, lateral_out], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # lateral_out => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/e2/ce2gsr4e5ut7lcclkkxhgc4bx2nddp5zgimnhbvcxvbulcsoih5a.py # Topologically Sorted Source Nodes: [conv2d_1, conv_transpose2d, add, out], Original ATen: [aten.convolution, aten.add, aten.relu] # Source node to ATen node mapping: # add => add # conv2d_1 => convolution_1 # conv_transpose2d => convolution_2 # out => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_8, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], True, [1, 1], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %convolution_2), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) triton_poi_fused_add_convolution_relu_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), None) tmp4 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tl.store(in_out_ptr0 + (x3), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/bi/cbiqc4z2xsax4qhwcessw4ywdncgstkzkhvtksvl74s5xfxhbus4.py # Topologically Sorted Source Nodes: [conv2d_2, out_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_2 => convolution_3 # out_1 => relu_2 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_9, %primals_10, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, None) tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (4, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_10, (256, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 16, 16), (65536, 256, 16, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, lateral_out], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 16, 16), (65536, 256, 16, 1)) # Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(primals_8, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 16, 16), (65536, 256, 16, 1)) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, conv_transpose2d, add, out], Original ATen: [aten.convolution, aten.add, aten.relu] triton_poi_fused_add_convolution_relu_1.run(buf4, primals_5, buf3, primals_7, 262144, grid=grid(262144), stream=stream0) del buf3 del primals_5 del primals_7 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 16, 16), (65536, 256, 16, 1)) buf6 = buf5; del buf5 # reuse buf7 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_2, out_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf6, primals_10, buf7, 262144, grid=grid(262144), stream=stream0) del primals_10 return (buf6, primals_1, primals_3, primals_4, primals_6, primals_8, primals_9, buf1, buf4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 16, 16), (1024, 256, 16, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 8, 8), (256, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from itertools import product as product class TCB(nn.Module): """ Transfer Connection Block Architecture This block """ def __init__(self, lateral_channels, channles, internal_channels=256, is_batchnorm=False): """ :param lateral_channels: number of forward feature channles :param channles: number of pyramid feature channles :param internal_channels: number of internal channels """ super(TCB, self).__init__() self.is_batchnorm = is_batchnorm use_bias = not self.is_batchnorm self.conv1 = nn.Conv2d(lateral_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.conv2 = nn.Conv2d(internal_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.deconv = nn.ConvTranspose2d(channles, internal_channels, kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias ) self.conv3 = nn.Conv2d(internal_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.relu = nn.ReLU(inplace=True) if self.is_batchnorm: self.bn1 = nn.BatchNorm2d(internal_channels) self.bn2 = nn.BatchNorm2d(internal_channels) self.deconv_bn = nn.BatchNorm2d(internal_channels) self.bn3 = nn.BatchNorm2d(internal_channels) self.out_channels = internal_channels def forward(self, lateral, x): if self.is_batchnorm: lateral_out = self.relu(self.bn1(self.conv1(lateral))) out = self.relu(self.bn2(self.conv2(lateral_out)) + self. deconv_bn(self.deconv(x))) out = self.relu(self.bn3(self.conv3(out))) else: lateral_out = self.relu(self.conv1(lateral)) out = self.relu(self.conv2(lateral_out) + self.deconv(x)) out = self.relu(self.conv3(out)) return out def get_inputs(): return [torch.rand([4, 4, 16, 16]), torch.rand([4, 4, 8, 8])] def get_init_inputs(): return [[], {'lateral_channels': 4, 'channles': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_convolution_relu_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, None) tmp4 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tl.store(in_out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_10, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 16, 16), (65536, 256, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 16, 16), (65536, 256, 16, 1)) buf3 = extern_kernels.convolution(primals_8, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 16, 16), (65536, 256, 16, 1)) buf4 = buf2 del buf2 triton_poi_fused_add_convolution_relu_1[grid(262144)](buf4, primals_5, buf3, primals_7, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf3 del primals_5 del primals_7 buf5 = extern_kernels.convolution(buf4, primals_9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 16, 16), (65536, 256, 16, 1)) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(262144)]( buf6, primals_10, buf7, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_10 return (buf6, primals_1, primals_3, primals_4, primals_6, primals_8, primals_9, buf1, buf4, buf7) class TCBNew(nn.Module): """ Transfer Connection Block Architecture This block """ def __init__(self, lateral_channels, channles, internal_channels=256, is_batchnorm=False): """ :param lateral_channels: number of forward feature channles :param channles: number of pyramid feature channles :param internal_channels: number of internal channels """ super(TCBNew, self).__init__() self.is_batchnorm = is_batchnorm use_bias = not self.is_batchnorm self.conv1 = nn.Conv2d(lateral_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.conv2 = nn.Conv2d(internal_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.deconv = nn.ConvTranspose2d(channles, internal_channels, kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias ) self.conv3 = nn.Conv2d(internal_channels, internal_channels, kernel_size=3, padding=1, bias=use_bias) self.relu = nn.ReLU(inplace=True) if self.is_batchnorm: self.bn1 = nn.BatchNorm2d(internal_channels) self.bn2 = nn.BatchNorm2d(internal_channels) self.deconv_bn = nn.BatchNorm2d(internal_channels) self.bn3 = nn.BatchNorm2d(internal_channels) self.out_channels = internal_channels def forward(self, input_0, input_1): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.deconv.weight primals_7 = self.deconv.bias primals_9 = self.conv3.weight primals_10 = self.conv3.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
SaralaSewwandi/refinedet-pytorch
TCB
false
8,743
[ "MIT" ]
43
d1eb9f84216085858562d816f19aeb77c2ab604a
https://github.com/SaralaSewwandi/refinedet-pytorch/tree/d1eb9f84216085858562d816f19aeb77c2ab604a
resBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/hp/chp7woenoxkphczh7osqnw2qzo4iasy3omqbpgltn246hzll6zuq.py # Topologically Sorted Source Nodes: [conv2d, instance_norm, x], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # instance_norm => add, rsqrt, var_mean # x => relu # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_relu_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 16.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.store(out_ptr2 + (r2 + (16*x3)), tmp27, xmask) tl.store(out_ptr3 + (x3), tmp24, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ac/caczstfocojcqatesxqkrbfi6ej5lfenb4lzgtnvwqxzqqeyxutn.py # Topologically Sorted Source Nodes: [conv2d_1, x_1, add, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add => add_2 # conv2d_1 => convolution_1 # x_1 => add_1, rsqrt_1, var_mean_1 # x_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_2,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (r2 + (16*x3)), xmask, other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 16.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp30 = 0.0 tmp31 = tmp29 <= tmp30 tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.store(out_ptr2 + (r2 + (16*x3)), tmp29, xmask) tl.store(out_ptr3 + (r2 + (16*x3)), tmp31, xmask) tl.store(out_ptr4 + (x3), tmp24, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [conv2d, instance_norm, x], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.relu] stream0 = get_raw_stream(0) triton_per_fused__native_batch_norm_legit_convolution_relu_0.run(buf1, primals_3, buf2, buf6, buf5, 16, 16, grid=grid(16), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = buf7; del buf7 # reuse buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, x_1, add, x_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add, aten.relu, aten.threshold_backward] triton_per_fused__native_batch_norm_legit_add_convolution_relu_threshold_backward_1.run(buf8, primals_5, primals_1, buf9, buf13, buf14, buf12, 16, 16, grid=grid(16), stream=stream0) del primals_5 return (buf13, primals_1, primals_2, primals_4, buf1, reinterpret_tensor(buf5, (16, ), (1, ), 0), buf6, buf8, reinterpret_tensor(buf12, (16, ), (1, ), 0), buf14, reinterpret_tensor(buf9, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class resBlock(nn.Module): def __init__(self, channelDepth, windowSize=3): super(resBlock, self).__init__() padding = math.floor(windowSize / 2) self.conv1 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, padding) self.conv2 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, padding) self.IN_conv = nn.InstanceNorm2d(channelDepth, track_running_stats= False, affine=False) def forward(self, x): res = x x = F.relu(self.IN_conv(self.conv1(x))) x = self.IN_conv(self.conv2(x)) x = F.relu(x + res) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channelDepth': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 16.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp27, xmask) tl.store(out_ptr3 + x3, tmp24, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_relu_threshold_backward_1( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 16.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tmp30 = 0.0 tmp31 = tmp29 <= tmp30 tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp29, xmask) tl.store(out_ptr3 + (r2 + 16 * x3), tmp31, xmask) tl.store(out_ptr4 + x3, tmp24, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_convolution_relu_0[grid(16)]( buf1, primals_3, buf2, buf6, buf5, 16, 16, XBLOCK=8, num_warps= 2, num_stages=1) del primals_3 buf7 = extern_kernels.convolution(buf6, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = buf7 del buf7 buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf12 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_add_convolution_relu_threshold_backward_1[ grid(16)](buf8, primals_5, primals_1, buf9, buf13, buf14, buf12, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_5 return buf13, primals_1, primals_2, primals_4, buf1, reinterpret_tensor( buf5, (16,), (1,), 0), buf6, buf8, reinterpret_tensor(buf12, (16,), (1,), 0), buf14, reinterpret_tensor(buf9, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0) class resBlockNew(nn.Module): def __init__(self, channelDepth, windowSize=3): super(resBlockNew, self).__init__() padding = math.floor(windowSize / 2) self.conv1 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, padding) self.conv2 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, padding) self.IN_conv = nn.InstanceNorm2d(channelDepth, track_running_stats= False, affine=False) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
SeokjaeLIM/DSLR-release
resBlock
false
8,744
[ "Apache-2.0" ]
14
861429482faf50ee3d6570948af8c48df1fc7f43
https://github.com/SeokjaeLIM/DSLR-release/tree/861429482faf50ee3d6570948af8c48df1fc7f43
TransformerEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/24/c24qsgijonbiqjcskkesmr6djddhrqjlc6pskdyvv3cj26t4733k.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/wk/cwkmrcckbwmqrnn75bcrj6x53nm4p3l2vitrgxgtbfaftyuxfsme.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vd/cvdmt5jqtkl26upubzg5v4fgenbinv4id2jwmzw4lx3n3g5oeutp.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zl/czlqk3zraa2s6rxtxkrdczoils3dsupq3gczxyfy2d7gczk7zfe3.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/25/c25jehyb2kwed7xq7lc3i4pphzroyhb7qjcs55wtc2k45bzxw6kz.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vm/cvmxc36ua3qvi5sfzotzrqa4y5ilb4k4ptxzsfqpgxth35vrel6k.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/dq/cdqntcymln4qdkhe6gvvdsmb76qsnxodwahqvlhhiapm6v3oepxb.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/rb/crbg4ppq53vvjejpxahbyutypksm57ramujfio3wyfmhkcvytz5j.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/5i/c5i5whyfnlmmdjdjlswewjrqx3jiavt7l2vj26iprli4o4orjnoh.py # Topologically Sorted Source Nodes: [x_1, src, src_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # src => add_3 # src_1 => var_mean_1 # x_1 => add_2 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/fi/cfiqg43urfzw3cnwoqneyfrtrtuyhd4kxnjlp3mhxzbnzqbxt25k.py # Topologically Sorted Source Nodes: [x_1, src, src_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # src => add_3 # src_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2 # x_1 => add_2 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_7), kwargs = {}) # %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_8), kwargs = {}) triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/eq/ceqy6gevwwrkitpm7y2bkkfyo3w7zs6r5q65wlgnfiiwguwcfy5s.py # Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu] # Source node to ATen node mapping: # gelu => add_6, erf, mul_5, mul_6, mul_7 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_6), kwargs = {}) triton_poi_fused_gelu_10 = async_compile.triton('triton_poi_fused_gelu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xu/cxuo3ylge4wnwyjdzyeca5ijduhctvd5pc5wr6sqrlhf5jhgp565.py # Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add] # Source node to ATen node mapping: # src_2 => add_7 # Graph fragment: # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %view_15), kwargs = {}) triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (2048, 4), (4, 1)) assert_size_stride(primals_10, (2048, ), (1, )) assert_size_stride(primals_11, (4, 2048), (2048, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0) del buf7 buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf3, buf9, 16, 4, grid=grid(16, 4), stream=stream0) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1; del buf1 # reuse buf14 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1, src, src_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf12, primals_6, buf13, buf14, 16, grid=grid(16), stream=stream0) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, src, src_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_9.run(primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, grid=grid(64), stream=stream0) del buf13 del buf14 del primals_8 buf16 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 2048), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.float32) # Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu] triton_poi_fused_gelu_10.run(buf16, buf17, 32768, grid=grid(32768), stream=stream0) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf17, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_11, (2048, 4), (1, 2048), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add] triton_poi_fused_add_11.run(buf19, buf15, primals_12, 64, grid=grid(64), stream=stream0) del primals_12 return (buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 2048), (2048, 1), 0), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn import Dropout from torch.nn import LayerNorm from torch.nn import Identity def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Attention(Module): """ Obtained from timm: github.com:rwightman/pytorch-image-models """ def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1): super().__init__() self.num_heads = num_heads head_dim = dim // self.num_heads self.scale = head_dim ** -0.5 self.qkv = Linear(dim, dim * 3, bias=False) self.attn_drop = Dropout(attention_dropout) self.proj = Linear(dim, dim) self.proj_drop = Dropout(projection_dropout) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class TransformerEncoderLayer(Module): """ Inspired by torch.nn.TransformerEncoderLayer and timm. """ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, attention_dropout=0.1, drop_path_rate=0.1): super(TransformerEncoderLayer, self).__init__() self.pre_norm = LayerNorm(d_model) self.self_attn = Attention(dim=d_model, num_heads=nhead, attention_dropout=attention_dropout, projection_dropout=dropout) self.linear1 = Linear(d_model, dim_feedforward) self.dropout1 = Dropout(dropout) self.norm1 = LayerNorm(d_model) self.linear2 = Linear(dim_feedforward, d_model) self.dropout2 = Dropout(dropout) self.drop_path = DropPath(drop_path_rate ) if drop_path_rate > 0 else Identity() self.activation = F.gelu def forward(self, src: 'torch.Tensor', *args, **kwargs) ->torch.Tensor: src = src + self.drop_path(self.self_attn(self.pre_norm(src))) src = self.norm1(src) src2 = self.linear2(self.dropout1(self.activation(self.linear1(src)))) src = src + self.drop_path(self.dropout2(src2)) return src def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn import Dropout from torch.nn import LayerNorm from torch.nn import Identity assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (2048, 4), (4, 1)) assert_size_stride(primals_10, (2048,), (1,)) assert_size_stride(primals_11, (4, 2048), (2048, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12, primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_8 buf16 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 2048), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.float32 ) triton_poi_fused_gelu_10[grid(32768)](buf16, buf17, 32768, XBLOCK= 256, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_11, (2048, 4), (1, 2048), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_11[grid(64)](buf19, buf15, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf17, (16, 2048), (2048, 1), 0 ), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4 def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """ Obtained from: github.com:rwightman/pytorch-image-models Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Attention(Module): """ Obtained from timm: github.com:rwightman/pytorch-image-models """ def __init__(self, dim, num_heads=8, attention_dropout=0.1, projection_dropout=0.1): super().__init__() self.num_heads = num_heads head_dim = dim // self.num_heads self.scale = head_dim ** -0.5 self.qkv = Linear(dim, dim * 3, bias=False) self.attn_drop = Dropout(attention_dropout) self.proj = Linear(dim, dim) self.proj_drop = Dropout(projection_dropout) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class TransformerEncoderLayerNew(Module): """ Inspired by torch.nn.TransformerEncoderLayer and timm. """ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, attention_dropout=0.1, drop_path_rate=0.1): super(TransformerEncoderLayerNew, self).__init__() self.pre_norm = LayerNorm(d_model) self.self_attn = Attention(dim=d_model, num_heads=nhead, attention_dropout=attention_dropout, projection_dropout=dropout) self.linear1 = Linear(d_model, dim_feedforward) self.dropout1 = Dropout(dropout) self.norm1 = LayerNorm(d_model) self.linear2 = Linear(dim_feedforward, d_model) self.dropout2 = Dropout(dropout) self.drop_path = DropPath(drop_path_rate ) if drop_path_rate > 0 else Identity() self.activation = F.gelu def forward(self, input_0): primals_1 = self.pre_norm.weight primals_2 = self.pre_norm.bias primals_4 = self.self_attn.qkv.weight primals_5 = self.self_attn.proj.weight primals_6 = self.self_attn.proj.bias primals_9 = self.linear1.weight primals_10 = self.linear1.bias primals_7 = self.norm1.weight primals_8 = self.norm1.bias primals_11 = self.linear2.weight primals_12 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
RongKaiWeskerMA/INSTA
TransformerEncoderLayer
false
8,745
[ "MIT" ]
22
298bec0aeac3c1fde7bbcd4dece72ded1056e478
https://github.com/RongKaiWeskerMA/INSTA/tree/298bec0aeac3c1fde7bbcd4dece72ded1056e478
Blockdown
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/tc/ctcofj6pm4dy4kxxnh4c5owlcpvwuzx3bzarslfstbmlcpcbahz3.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/al/calxi7m5xdvqhadp3ebswq7umqz6gdsdeypeymifpoporwc3swk2.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => convolution_1 # x_3 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 2, 2), (16, 4, 2, 1)) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf4, 64, grid=grid(64), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn class conv_bn_relu(nn.Module): def __init__(self, in_channel, out_channel, stride=1, has_relu=True): super(conv_bn_relu, self).__init__() self.conv = nn.Conv2d(in_channel, out_channel, 3, stride=stride, padding=1, bias=True) if has_relu: self.relu = nn.ReLU(inplace=True) else: self.relu = None def forward(self, x): x = self.conv(x) if self.relu: x = self.relu(x) return x class Blockdown(nn.Module): def __init__(self, in_channel, out_channel): super(Blockdown, self).__init__() self.conv1 = conv_bn_relu(in_channel, out_channel, stride=2) self.conv2 = conv_bn_relu(out_channel, out_channel) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 2, 2), (16, 4, 2, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(64)](buf3, primals_5, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf4 class conv_bn_relu(nn.Module): def __init__(self, in_channel, out_channel, stride=1, has_relu=True): super(conv_bn_relu, self).__init__() self.conv = nn.Conv2d(in_channel, out_channel, 3, stride=stride, padding=1, bias=True) if has_relu: self.relu = nn.ReLU(inplace=True) else: self.relu = None def forward(self, x): x = self.conv(x) if self.relu: x = self.relu(x) return x class BlockdownNew(nn.Module): def __init__(self, in_channel, out_channel): super(BlockdownNew, self).__init__() self.conv1 = conv_bn_relu(in_channel, out_channel, stride=2) self.conv2 = conv_bn_relu(out_channel, out_channel) def forward(self, input_0): primals_1 = self.conv1.conv.weight primals_2 = self.conv1.conv.bias primals_4 = self.conv2.conv.weight primals_5 = self.conv2.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
SeanChenxy/GAN_RS
Blockdown
false
8,746
[ "BSD-3-Clause" ]
17
a1786b946caf7bd24c83cea4c7a9bb74445cc381
https://github.com/SeanChenxy/GAN_RS/tree/a1786b946caf7bd24c83cea4c7a9bb74445cc381
PolicyBasis
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/3a/c3agnuokrhkjgiiuk5dd5lpoxh3xnnknmbsnzetmbks4cbk3lbkm.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yb/cybscaahlkrxtrsbhfzyu7m2drt5hcckp3iojte2gv3g3iecftql.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %primals_5), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(primals_1, reinterpret_tensor(buf0, (4, 16), (16, 1), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (4, 4, 1), (4, 1, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4, 1), (4, 1, 1), 0), out=buf3) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf4, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf5 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf5, primals_6, 16, grid=grid(16), stream=stream0) del primals_6 return (buf4, buf5, reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_4, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(primals_3, (4, 1, 4), (4, 1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class PolicyBasis(nn.Module): def __init__(self, action_num, state_dim, task_dim): super(PolicyBasis, self).__init__() self.state_dim = state_dim self.task_dim = task_dim self.action_num = action_num self.weight_mu = nn.Parameter(torch.Tensor(action_num, state_dim, task_dim)) self.policy_bias_mu = nn.Parameter(torch.Tensor(action_num)) self.reward_bias_mu = nn.Parameter(torch.Tensor(action_num)) self.reset_parameters() def forward(self, input1, input2, input3): N = input1.size(0) state_action_feat = torch.mm(input1, self.weight_mu.transpose(1, 0) .contiguous().view(self.state_dim, self.action_num * self.task_dim) ).view(N, self.action_num, self.task_dim) output1 = torch.bmm(state_action_feat, input2.unsqueeze(2)).squeeze(2) output2 = torch.bmm(state_action_feat, input3.unsqueeze(2)).squeeze(2) return (output1 + self.policy_bias_mu, output2 + self. reward_bias_mu, state_action_feat) def reset_parameters(self): mu_range = 1 / np.sqrt(self.state_dim * self.task_dim * self.action_num ) self.weight_mu.data.uniform_(-mu_range, mu_range) self.policy_bias_mu.data.fill_(0) self.reward_bias_mu.data.fill_(0) def __repr__(self): return (self.__class__.__name__ + '(state_featurs={}, task_features={}, action_num={})'.format( self.state_dim, self.task_dim, self.action_num)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'action_num': 4, 'state_dim': 4, 'task_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(buf0, (4, 16), (16, 1), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (4, 4, 1), (4, 1, 1), 0), out =buf2) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4, 1), (4, 1, 1), 0), out =buf3) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 triton_poi_fused_add_1[grid(16)](buf4, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0) del buf3 triton_poi_fused_add_1[grid(16)](buf5, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 return buf4, buf5, reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_4, (4, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(primals_3, (4, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class PolicyBasisNew(nn.Module): def __init__(self, action_num, state_dim, task_dim): super(PolicyBasisNew, self).__init__() self.state_dim = state_dim self.task_dim = task_dim self.action_num = action_num self.weight_mu = nn.Parameter(torch.Tensor(action_num, state_dim, task_dim)) self.policy_bias_mu = nn.Parameter(torch.Tensor(action_num)) self.reward_bias_mu = nn.Parameter(torch.Tensor(action_num)) self.reset_parameters() def reset_parameters(self): mu_range = 1 / np.sqrt(self.state_dim * self.task_dim * self.action_num ) self.weight_mu.data.uniform_(-mu_range, mu_range) self.policy_bias_mu.data.fill_(0) self.reward_bias_mu.data.fill_(0) def __repr__(self): return (self.__class__.__name__ + '(state_featurs={}, task_features={}, action_num={})'.format( self.state_dim, self.task_dim, self.action_num)) def forward(self, input_0, input_1, input_2): primals_2 = self.weight_mu primals_5 = self.policy_bias_mu primals_6 = self.reward_bias_mu primals_1 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1], output[2]
Sha-Lab/SynPo
PolicyBasis
false
8,747
[ "MIT" ]
18
8ac35a01d2c810187b9c14b914bcb792ed73caa9
https://github.com/Sha-Lab/SynPo/tree/8ac35a01d2c810187b9c14b914bcb792ed73caa9
C3D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/zy/czycdvaiww55zxm2ry37fc7sdtfas6zwqqvnlngogsnneijkr4m4.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1, 1, 1], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 64) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn class C3D(nn.Module): def __init__(self, inplanes, planes): super(C3D, self).__init__() self.c3d = nn.Conv3d(inplanes, planes, kernel_size=3, padding=1) def forward(self, x): x = self.c3d(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) class C3DNew(nn.Module): def __init__(self, inplanes, planes): super(C3DNew, self).__init__() self.c3d = nn.Conv3d(inplanes, planes, kernel_size=3, padding=1) def forward(self, input_0): primals_1 = self.c3d.weight primals_2 = self.c3d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Schmiddo/d2conv3d
C3D
false
8,748
[ "MIT" ]
16
9b330be56f0dfb9657a63e3fb3394ab36b35a67b
https://github.com/Schmiddo/d2conv3d/tree/9b330be56f0dfb9657a63e3fb3394ab36b35a67b
BCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/oy/coygg6k5dphbfsegfs7u72u2br6cqghmp5n4rpcizvjdngy54boq.py # Topologically Sorted Source Nodes: [binary_cross_entropy], Original ATen: [aten.binary_cross_entropy] # Source node to ATen node mapping: # binary_cross_entropy => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) triton_per_fused_binary_cross_entropy_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy], Original ATen: [aten.binary_cross_entropy] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F import torchvision.transforms.functional as F from torch.nn import functional as F import torch.cuda def binary_cross_entropy(inputs, target, weight=None, reduction='mean', smooth_eps=None, from_logits=False): """cross entropy loss, with support for label smoothing https://arxiv.org/abs/1512.00567""" smooth_eps = smooth_eps or 0 if smooth_eps > 0: target = target.float() target.add_(smooth_eps).div_(2.0) if from_logits: return F.binary_cross_entropy_with_logits(inputs, target, weight= weight, reduction=reduction) else: return F.binary_cross_entropy(inputs, target, weight=weight, reduction=reduction) class BCELoss(nn.BCELoss): def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean', smooth_eps=None, from_logits=False): super(BCELoss, self).__init__(weight, size_average, reduce, reduction) self.smooth_eps = smooth_eps self.from_logits = from_logits def forward(self, input, target): return binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction, smooth_eps=self.smooth_eps, from_logits=self.from_logits) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.nn.functional as F import torchvision.transforms.functional as F from torch.nn import functional as F import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def binary_cross_entropy(inputs, target, weight=None, reduction='mean', smooth_eps=None, from_logits=False): """cross entropy loss, with support for label smoothing https://arxiv.org/abs/1512.00567""" smooth_eps = smooth_eps or 0 if smooth_eps > 0: target = target.float() target.add_(smooth_eps).div_(2.0) if from_logits: return F.binary_cross_entropy_with_logits(inputs, target, weight= weight, reduction=reduction) else: return F.binary_cross_entropy(inputs, target, weight=weight, reduction=reduction) class BCELossNew(nn.BCELoss): def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean', smooth_eps=None, from_logits=False): super(BCELossNew, self).__init__(weight, size_average, reduce, reduction) self.smooth_eps = smooth_eps self.from_logits = from_logits def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
RichardScottOZ/sota-data-augmentation-and-optimizers
BCELoss
false
8,749
[ "MIT" ]
31
60128ca762ac2864a3b54c43c36d1d5aa2033e5a
https://github.com/RichardScottOZ/sota-data-augmentation-and-optimizers/tree/60128ca762ac2864a3b54c43c36d1d5aa2033e5a
NB
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/yh/cyh2ll5mee5wqmmcim72iqhtmld7a2fc75dktzhdl3yvdq3o3fte.py # Topologically Sorted Source Nodes: [add_2, log_1, add, add_1, log_theta_mu_eps, sub, mul, add_3, log_2, sub_1, mul_1, add_4, add_5, lgamma, add_6, lgamma_1, sub_2, add_7, lgamma_2, res, res_1], Original ATen: [aten.add, aten.log, aten.sub, aten.mul, aten.lgamma, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # add_4 => add_4 # add_5 => add_5 # add_6 => add_6 # add_7 => add_7 # lgamma => lgamma # lgamma_1 => lgamma_1 # lgamma_2 => lgamma_2 # log_1 => log_1 # log_2 => log_2 # log_theta_mu_eps => log # mul => mul # mul_1 => mul_1 # res => sub_3 # res_1 => mean # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-08), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-08), kwargs = {}) # %log : [num_users=2] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_1, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sub), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1e-08), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_3,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log_2, %log), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %sub_1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg2_1, %arg0_1), kwargs = {}) # %lgamma : [num_users=1] = call_function[target=torch.ops.aten.lgamma.default](args = (%add_5,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %lgamma), kwargs = {}) # %lgamma_1 : [num_users=1] = call_function[target=torch.ops.aten.lgamma.default](args = (%arg0_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %lgamma_1), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg2_1, 1), kwargs = {}) # %lgamma_2 : [num_users=1] = call_function[target=torch.ops.aten.lgamma.default](args = (%add_7,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %lgamma_2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {}) triton_per_fused_add_lgamma_log_mean_mul_sub_0 = async_compile.triton('triton_per_fused_add_lgamma_log_mean_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_lgamma_log_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_lgamma_log_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp4 = tl.load(in_ptr1 + (r0), None) tmp10 = tl.load(in_ptr2 + (r0), None) tmp1 = 1e-08 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp5 = tmp0 + tmp4 tmp6 = tmp5 + tmp1 tmp7 = tl_math.log(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tmp0 * tmp8 tmp11 = tmp4 + tmp1 tmp12 = tl_math.log(tmp11) tmp13 = tmp12 - tmp7 tmp14 = tmp10 * tmp13 tmp15 = tmp9 + tmp14 tmp16 = tmp10 + tmp0 tmp17 = libdevice.lgamma(tmp16) tmp18 = tmp15 + tmp17 tmp19 = libdevice.lgamma(tmp0) tmp20 = tmp18 - tmp19 tmp21 = 1.0 tmp22 = tmp10 + tmp21 tmp23 = libdevice.lgamma(tmp22) tmp24 = tmp20 - tmp23 tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 256.0 tmp29 = tmp27 / tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp29, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add_2, log_1, add, add_1, log_theta_mu_eps, sub, mul, add_3, log_2, sub_1, mul_1, add_4, add_5, lgamma, add_6, lgamma_1, sub_2, add_7, lgamma_2, res, res_1], Original ATen: [aten.add, aten.log, aten.sub, aten.mul, aten.lgamma, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_lgamma_log_mean_mul_sub_0.run(buf1, arg0_1, arg1_1, arg2_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class NB(torch.nn.Module): """ Yang Comment: Usage in forward: x : Ground truth mu: Prediction theta: Another trainable parameter with shape=[xdim(number of count variables)], simply initialize a nn.Parameter(torch.randn(xdim)) in the model Be careful, we need the negative of the returned value: loss = -NBLoss """ def __init__(self, eps=1e-08, reduction='mean'): super(NB, self).__init__() self.eps = eps self.reduction = reduction def forward(self, x: 'torch.Tensor', mu: 'torch.Tensor', theta: 'torch.Tensor'): if theta.ndimension() == 1: theta = theta.view(1, theta.size(0)) log_theta_mu_eps = torch.log(theta + mu + self.eps) res = theta * (torch.log(theta + self.eps) - log_theta_mu_eps) + x * ( torch.log(mu + self.eps) - log_theta_mu_eps) + torch.lgamma(x + theta) - torch.lgamma(theta) - torch.lgamma(x + 1) if self.reduction == 'mean': res = torch.mean(res) elif self.reduction == 'sum': res = torch.sum(res) else: raise NotImplementedError( f'reduction method {self.reduction} is not implemented.') return res def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_lgamma_log_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_ptr2 + r0, None) tmp1 = 1e-08 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp5 = tmp0 + tmp4 tmp6 = tmp5 + tmp1 tmp7 = tl_math.log(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tmp0 * tmp8 tmp11 = tmp4 + tmp1 tmp12 = tl_math.log(tmp11) tmp13 = tmp12 - tmp7 tmp14 = tmp10 * tmp13 tmp15 = tmp9 + tmp14 tmp16 = tmp10 + tmp0 tmp17 = libdevice.lgamma(tmp16) tmp18 = tmp15 + tmp17 tmp19 = libdevice.lgamma(tmp0) tmp20 = tmp18 - tmp19 tmp21 = 1.0 tmp22 = tmp10 + tmp21 tmp23 = libdevice.lgamma(tmp22) tmp24 = tmp20 - tmp23 tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 256.0 tmp29 = tmp27 / tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_lgamma_log_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class NBNew(torch.nn.Module): """ Yang Comment: Usage in forward: x : Ground truth mu: Prediction theta: Another trainable parameter with shape=[xdim(number of count variables)], simply initialize a nn.Parameter(torch.randn(xdim)) in the model Be careful, we need the negative of the returned value: loss = -NBLoss """ def __init__(self, eps=1e-08, reduction='mean'): super(NBNew, self).__init__() self.eps = eps self.reduction = reduction def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
SchubertLab/mvTCR
NB
false
8,750
[ "MIT" ]
16
d815749e24650f69ef68054e0078d490af91b71d
https://github.com/SchubertLab/mvTCR/tree/d815749e24650f69ef68054e0078d490af91b71d
MinibatchStd
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/2j/c2jpyslnqqzvzecm5aym7d3dkyyo3ywqbq6ta7pdce34bjktk5wx.py # Topologically Sorted Source Nodes: [mean, y_2, x], Original ATen: [aten.mean, aten.sub, aten.cat] # Source node to ATen node mapping: # mean => mean # x => cat # y_2 => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_1, %expand], 1), kwargs = {}) # %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %view_1), kwargs = {}) triton_poi_fused_cat_mean_sub_0 = async_compile.triton('triton_poi_fused_cat_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_mean_sub_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr2'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_mean_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) tl.store(out_ptr1 + (x0 + (80*x1)), tmp10, xmask) tl.store(out_ptr2 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/lk/clkg6qsda66vnr5yv4figcu6jxv6zjendnzt3sfds4nv4vkwgz7j.py # Topologically Sorted Source Nodes: [y_5, x], Original ATen: [aten.mean, aten.cat] # Source node to ATen node mapping: # x => cat # y_5 => mean_2 # Graph fragment: # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_3, [-1]), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_1, %expand], 1), kwargs = {}) triton_per_fused_cat_mean_1 = async_compile.triton('triton_per_fused_cat_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr0 + (128 + r0), None) tmp8 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tl.store(out_ptr1 + (tl.broadcast_to(r1 + (80*r2), [XBLOCK, RBLOCK])), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [mean, y_2, x], Original ATen: [aten.mean, aten.sub, aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_mean_sub_0.run(arg0_1, buf0, buf2, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = reinterpret_tensor(buf4, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias # Topologically Sorted Source Nodes: [y_5, x], Original ATen: [aten.mean, aten.cat] triton_per_fused_cat_mean_1.run(buf0, buf3, 1, 64, grid=grid(1), stream=stream0) del buf0 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.tensorboard import torch.nn class MinibatchStd(nn.Module): """ Adds the aveage std of each data point over a slice of the minibatch to that slice as a new feature map. This gives an output with one extra channel. Arguments: group_size (int): Number of entries in each slice of the batch. If <= 0, the entire batch is used. Default value is 4. eps (float): Epsilon value added for numerical stability. Default value is 1e-8. """ def __init__(self, group_size=4, eps=1e-08, *args, **kwargs): super(MinibatchStd, self).__init__() if group_size is None or group_size <= 0: group_size = 0 assert group_size != 1, 'Can not use 1 as minibatch std group size.' self.group_size = group_size self.eps = eps def forward(self, input, **kwargs): """ Add a new feature map to the input containing the average standard deviation for each slice. Arguments: input (torch.Tensor) Returns: output (torch.Tensor) """ group_size = self.group_size or input.size(0) assert input.size(0 ) >= group_size, 'Can not use a smaller batch size ' + '({}) than the specified '.format( input.size(0)) + 'group size ({}) '.format(group_size ) + 'of this minibatch std layer.' assert input.size(0 ) % group_size == 0, 'Can not use a batch of a size ' + '({}) that is not '.format( input.size(0)) + 'evenly divisible by the group size ({})'.format( group_size) x = input y = input.view(group_size, -1, *input.size()[1:]) y = y.float() y -= y.mean(dim=0, keepdim=True) y = torch.mean(y ** 2, dim=0) y = torch.sqrt(y + self.eps) y = torch.mean(y.view(y.size(0), -1), dim=-1) y = y.view(-1, *([1] * (input.dim() - 1))) y = y y = y.repeat(group_size, *([1] * (y.dim() - 1))) y = y.expand(y.size(0), 1, *x.size()[2:]) x = torch.cat([x, y], dim=1) return x def extra_repr(self): return 'group_size={}'.format(self.group_size or '-1') def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.tensorboard import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_mean_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) tl.store(out_ptr1 + (x0 + 80 * x1), tmp10, xmask) tl.store(out_ptr2 + x2, tmp10, xmask) @triton.jit def triton_per_fused_cat_mean_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr0 + (64 + r0), None) tmp5 = tl.load(in_ptr0 + (128 + r0), None) tmp8 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp20, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf4, (4, 4, 4, 4), (80, 16, 4, 1), 0) get_raw_stream(0) triton_poi_fused_cat_mean_sub_0[grid(256)](arg0_1, buf0, buf2, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf3 = reinterpret_tensor(buf4, (4, 1, 4, 4), (80, 16, 4, 1), 64) triton_per_fused_cat_mean_1[grid(1)](buf0, buf3, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf4, class MinibatchStdNew(nn.Module): """ Adds the aveage std of each data point over a slice of the minibatch to that slice as a new feature map. This gives an output with one extra channel. Arguments: group_size (int): Number of entries in each slice of the batch. If <= 0, the entire batch is used. Default value is 4. eps (float): Epsilon value added for numerical stability. Default value is 1e-8. """ def __init__(self, group_size=4, eps=1e-08, *args, **kwargs): super(MinibatchStdNew, self).__init__() if group_size is None or group_size <= 0: group_size = 0 assert group_size != 1, 'Can not use 1 as minibatch std group size.' self.group_size = group_size self.eps = eps def extra_repr(self): return 'group_size={}'.format(self.group_size or '-1') def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Klanly/StyleFlowPytorch
MinibatchStd
false
8,751
[ "MIT" ]
24
4552108ea1de69e9e9c027909738bbc755ab5cf6
https://github.com/Klanly/StyleFlowPytorch/tree/4552108ea1de69e9e9c027909738bbc755ab5cf6
ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/bo/cbojn46mcilnpi2xhmnsfqtphuo55gwue2ef62wdctez2do3etb3.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => exp_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [2], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 4), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/yh/cyhf6bhaqimi2pucos5fnrpvhrt4vuaetbxnooyr5pvgjt7s6fgo.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => div_1, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [2], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/nt/cnt7zvmu3yotfer3nokgaqxf26aq66mrpxkg6fsw2n25pe5gcg5k.py # Topologically Sorted Source Nodes: [log_attn], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_attn => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, buf4, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_attn], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf4, buf5, 64, grid=grid(64), stream=stream0) del buf4 return (buf3, buf2, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, q, k, v): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature log_attn = F.log_softmax(attn, 2) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output, attn, log_attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf4, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 return buf3, buf2, buf5 class ScaledDotProductAttentionNew(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1], output[2]
Sha-Lab/CASTLE
ScaledDotProductAttention
false
8,752
[ "MIT" ]
13
212cb7aaad1bfae7041c90143220286bde24db33
https://github.com/Sha-Lab/CASTLE/tree/212cb7aaad1bfae7041c90143220286bde24db33
Smooth_loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/wl/cwl5ist2kzqaxx2f4xhoq3vo7lkczwnfdactx3vhtrjoslgmnfkd.py # Topologically Sorted Source Nodes: [sub, pow_1, h_tv, sub_1, pow_2, w_tv, add, truediv], Original ATen: [aten.sub, aten.pow, aten.mean, aten.add, aten.div] # Source node to ATen node mapping: # add => add # h_tv => mean # pow_1 => pow_1 # pow_2 => pow_2 # sub => sub # sub_1 => sub_1 # truediv => div # w_tv => mean_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {}) triton_per_fused_add_div_mean_pow_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = (rindex // 4) % 4 r4 = rindex r0 = rindex % 4 tmp0 = 1 + r1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (tl.broadcast_to(4 + r4, [RBLOCK])), tmp5, other=0.0) tmp7 = (-1) + r1 tmp8 = tmp7 >= tmp1 tmp9 = tmp7 < tmp3 tmp10 = tmp8 & tmp9 tmp11 = tl.load(in_ptr0 + (tl.broadcast_to((-4) + r4, [RBLOCK])), tmp10, other=0.0) tmp12 = tmp6 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 1 + r0 tmp18 = tmp17 >= tmp1 tmp19 = tmp17 < tmp3 tmp20 = tmp18 & tmp19 tmp21 = tl.load(in_ptr0 + (tl.broadcast_to(1 + r4, [RBLOCK])), tmp20, other=0.0) tmp22 = (-1) + r0 tmp23 = tmp22 >= tmp1 tmp24 = tmp22 < tmp3 tmp25 = tmp23 & tmp24 tmp26 = tl.load(in_ptr0 + (tl.broadcast_to((-1) + r4, [RBLOCK])), tmp25, other=0.0) tmp27 = tmp21 - tmp26 tmp28 = tmp27 * tmp27 tmp29 = tl.broadcast_to(tmp28, [RBLOCK]) tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0)) tmp32 = 256.0 tmp33 = tmp16 / tmp32 tmp34 = tmp31 / tmp32 tmp35 = tmp33 + tmp34 tmp36 = 0.5 tmp37 = tmp35 * tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp37, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, h_tv, sub_1, pow_2, w_tv, add, truediv], Original ATen: [aten.sub, aten.pow, aten.mean, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_pow_sub_0.run(buf2, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Smooth_loss(nn.Module): def __init__(self, Smooth_weight=1): super(Smooth_loss, self).__init__() self.Smooth_weight = Smooth_weight def forward(self, x): _b, _c, h, w = x.size() x_h = F.pad(x, (0, 0, 1, 1)) h_tv = torch.mean(torch.pow(x_h[:, :, 2:, :] - x_h[:, :, :h, :], 2)) x_w = F.pad(x, (1, 1, 0, 0)) w_tv = torch.mean(torch.pow(x_w[:, :, :, 2:] - x_w[:, :, :, :w], 2)) self.loss = (h_tv + w_tv) / 2 return self.loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mean_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex // 4 % 4 r4 = rindex r0 = rindex % 4 tmp0 = 1 + r1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + tl.broadcast_to(4 + r4, [RBLOCK]), tmp5, other=0.0 ) tmp7 = -1 + r1 tmp8 = tmp7 >= tmp1 tmp9 = tmp7 < tmp3 tmp10 = tmp8 & tmp9 tmp11 = tl.load(in_ptr0 + tl.broadcast_to(-4 + r4, [RBLOCK]), tmp10, other=0.0) tmp12 = tmp6 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 1 + r0 tmp18 = tmp17 >= tmp1 tmp19 = tmp17 < tmp3 tmp20 = tmp18 & tmp19 tmp21 = tl.load(in_ptr0 + tl.broadcast_to(1 + r4, [RBLOCK]), tmp20, other=0.0) tmp22 = -1 + r0 tmp23 = tmp22 >= tmp1 tmp24 = tmp22 < tmp3 tmp25 = tmp23 & tmp24 tmp26 = tl.load(in_ptr0 + tl.broadcast_to(-1 + r4, [RBLOCK]), tmp25, other=0.0) tmp27 = tmp21 - tmp26 tmp28 = tmp27 * tmp27 tmp29 = tl.broadcast_to(tmp28, [RBLOCK]) tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0)) tmp32 = 256.0 tmp33 = tmp16 / tmp32 tmp34 = tmp31 / tmp32 tmp35 = tmp33 + tmp34 tmp36 = 0.5 tmp37 = tmp35 * tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp37, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mean_pow_sub_0[grid(1)](buf2, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf2, class Smooth_lossNew(nn.Module): def __init__(self, Smooth_weight=1): super(Smooth_lossNew, self).__init__() self.Smooth_weight = Smooth_weight def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SeokjaeLIM/DSLR-release
Smooth_loss
false
8,753
[ "Apache-2.0" ]
14
861429482faf50ee3d6570948af8c48df1fc7f43
https://github.com/SeokjaeLIM/DSLR-release/tree/861429482faf50ee3d6570948af8c48df1fc7f43
EncoderCNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/4m/c4mcarghhikyuoixort2auhhvktolbfre37asm6l7rnaltlf2s4u.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 8 y1 = (yindex // 8) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (8*x2) + (128*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/wl/cwlspyajtbqyjxcwpv7patze6a5tqyfbiwclnjg2ydxgnpjgcomw.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 8 y1 = (yindex // 8) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (8*x2) + (32768*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ch/cchvtnveahpbef57eyebh5xsfar4eodpscnqbem76zlls7g4dtbt.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 10240 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 40 y1 = (yindex // 40) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (40*x2) + (640*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vd/cvdq3e3rctpnnvcpsqi6ljowjydylaevj43juwya2lkq2gt4x6rm.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 16 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (4096*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ga/cga4qqmcosilvhj57sx2o6k4jmyvhesrnz4ef2jnsmeg27hzsjis.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 524288 xnumel = 16 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (8192*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xt/cxtdntmfzt4pxgg4ax4nwqbr2saif6gvx5tu6mhjtr3mmu6joqnz.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/6h/c6hyzpdamaawo2mofyxckf5uc76dns3pw3d5al7dfi3wyq6yjh63.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_1, %where_2, %where_3, %where_4], 1), kwargs = {}) triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 163840 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 40 x1 = (xindex // 40) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 8, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((8*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 16, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + ((8*x1) + ((-8) + x0)), tmp18, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 24, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + ((8*x1) + ((-16) + x0)), tmp30, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tmp40 = tl.full([1], 32, tl.int64) tmp41 = tmp0 < tmp40 tmp42 = tmp39 & tmp41 tmp43 = tl.load(in_ptr3 + ((8*x1) + ((-24) + x0)), tmp42, eviction_policy='evict_last', other=0.0) tmp44 = tmp43 > tmp6 tmp45 = tmp43 * tmp8 tmp46 = libdevice.expm1(tmp45) tmp47 = tmp46 * tmp8 tmp48 = tl.where(tmp44, tmp45, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp42, tmp48, tmp49) tmp51 = tmp0 >= tmp40 tmp52 = tl.full([1], 40, tl.int64) tmp53 = tmp0 < tmp52 tmp54 = tl.load(in_ptr4 + ((8*x1) + ((-32) + x0)), tmp51, eviction_policy='evict_last', other=0.0) tmp55 = tmp54 > tmp6 tmp56 = tmp54 * tmp8 tmp57 = libdevice.expm1(tmp56) tmp58 = tmp57 * tmp8 tmp59 = tl.where(tmp55, tmp56, tmp58) tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype) tmp61 = tl.where(tmp51, tmp59, tmp60) tmp62 = tl.where(tmp42, tmp50, tmp61) tmp63 = tl.where(tmp30, tmp38, tmp62) tmp64 = tl.where(tmp18, tmp26, tmp63) tmp65 = tl.where(tmp4, tmp14, tmp64) tl.store(out_ptr0 + (x2), tmp65, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vj/cvjjtqkvuaimcvmfmqshxjakjxckfp5zdb4eja5sx6ncidpg3myx.py # Topologically Sorted Source Nodes: [conv2d_5, x_1], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_5 => convolution_5 # x_1 => expm1_5, gt_5, mul_15, mul_17, where_5 # Graph fragment: # %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_12, %primals_13, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {}) # %mul_15 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 1.0), kwargs = {}) # %expm1_5 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_15,), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_5, 1.0), kwargs = {}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %mul_15, %mul_17), kwargs = {}) triton_poi_fused_convolution_elu_7 = async_compile.triton('triton_poi_fused_convolution_elu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ws/cwsnognqaoy6k7trwgilh4cusquy2v3bpu4jlrjeyz2bjvzn2mwr.py # Topologically Sorted Source Nodes: [conv2d_6, x_2], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_6 => convolution_6 # x_2 => expm1_6, gt_6, mul_18, mul_20, where_6 # Graph fragment: # %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_5, %primals_14, %primals_15, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {}) # %mul_18 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 1.0), kwargs = {}) # %expm1_6 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_18,), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_6, 1.0), kwargs = {}) # %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %mul_18, %mul_20), kwargs = {}) triton_poi_fused_convolution_elu_8 = async_compile.triton('triton_poi_fused_convolution_elu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xr/cxrrysiyglkcq7eoy2ingqrapathub6xg6g5dxm5ck3l2rmd4dxm.py # Topologically Sorted Source Nodes: [conv2d_7, x_3], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_7 => convolution_7 # x_3 => expm1_7, gt_7, mul_21, mul_23, where_7 # Graph fragment: # %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_6, %primals_16, %primals_17, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_7, 0), kwargs = {}) # %mul_21 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_7, 1.0), kwargs = {}) # %expm1_7 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_21,), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_7, 1.0), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %mul_21, %mul_23), kwargs = {}) triton_poi_fused_convolution_elu_9 = async_compile.triton('triton_poi_fused_convolution_elu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/me/cmei55vt5o7njhvbzn6oh7wwejyrgvxzinpwsvxwvbaiyts5ctqo.py # Topologically Sorted Source Nodes: [avg_pool2d, x_4], Original ATen: [aten.avg_pool2d, aten.squeeze] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # x_4 => squeeze # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_7, [2, 2]), kwargs = {}) # %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%avg_pool2d,), kwargs = {}) triton_poi_fused_avg_pool2d_squeeze_10 = async_compile.triton('triton_poi_fused_avg_pool2d_squeeze_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 1024], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_squeeze_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_squeeze_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y0 = yindex % 2 y4 = (yindex // 2) y2 = (yindex // 4) y5 = yindex % 4 tmp0 = tl.load(in_ptr0 + (x3 + (2048*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1024 + x3 + (2048*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4096 + x3 + (2048*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (5120 + x3 + (2048*y0) + (8192*y4)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (y5 + (4*x3) + (4096*y2)), tmp8, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 8, 64, 64), (32768, 4096, 64, 1)) assert_size_stride(primals_4, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_5, (8, ), (1, )) assert_size_stride(primals_6, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_7, (8, ), (1, )) assert_size_stride(primals_8, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_9, (8, ), (1, )) assert_size_stride(primals_10, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_11, (8, ), (1, )) assert_size_stride(primals_12, (256, 40, 4, 4), (640, 16, 4, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_15, (512, ), (1, )) assert_size_stride(primals_16, (1024, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_17, (1024, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 8, 4, 4), (128, 1, 32, 8), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 64, 16, grid=grid(64, 16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 8, 64, 64), (32768, 1, 512, 8), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 32, 4096, grid=grid(32, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((8, 8, 4, 4), (128, 1, 32, 8), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_4, buf2, 64, 16, grid=grid(64, 16), stream=stream0) del primals_4 buf3 = empty_strided_cuda((8, 8, 4, 4), (128, 1, 32, 8), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_6, buf3, 64, 16, grid=grid(64, 16), stream=stream0) del primals_6 buf4 = empty_strided_cuda((8, 8, 4, 4), (128, 1, 32, 8), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_8, buf4, 64, 16, grid=grid(64, 16), stream=stream0) del primals_8 buf5 = empty_strided_cuda((8, 8, 4, 4), (128, 1, 32, 8), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_10, buf5, 64, 16, grid=grid(64, 16), stream=stream0) del primals_10 buf6 = empty_strided_cuda((256, 40, 4, 4), (640, 1, 160, 40), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_12, buf6, 10240, 16, grid=grid(10240, 16), stream=stream0) del primals_12 buf7 = empty_strided_cuda((512, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_14, buf7, 131072, 16, grid=grid(131072, 16), stream=stream0) del primals_14 buf8 = empty_strided_cuda((1024, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_16, buf8, 524288, 16, grid=grid(524288, 16), stream=stream0) del primals_16 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 8, 32, 32), (8192, 1, 256, 8)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf10, primals_2, 32768, grid=grid(32768), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf1, buf2, stride=(2, 2), padding=(3, 3), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 8, 32, 32), (8192, 1, 256, 8)) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf12, primals_5, 32768, grid=grid(32768), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf1, buf3, stride=(2, 2), padding=(6, 6), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 8, 32, 32), (8192, 1, 256, 8)) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf14, primals_7, 32768, grid=grid(32768), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf1, buf4, stride=(2, 2), padding=(12, 12), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 8, 32, 32), (8192, 1, 256, 8)) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf16, primals_9, 32768, grid=grid(32768), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf1, buf5, stride=(2, 2), padding=(24, 24), dilation=(16, 16), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 8, 32, 32), (8192, 1, 256, 8)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf18, primals_11, 32768, grid=grid(32768), stream=stream0) del primals_11 buf19 = empty_strided_cuda((4, 40, 32, 32), (40960, 1, 1280, 40), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] triton_poi_fused_cat_6.run(buf10, buf12, buf14, buf16, buf18, buf19, 163840, grid=grid(163840), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, buf6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf21 = buf20; del buf20 # reuse buf22 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [conv2d_5, x_1], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_7.run(buf21, primals_13, buf22, 262144, grid=grid(262144), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf23 = extern_kernels.convolution(buf22, buf7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf24 = buf23; del buf23 # reuse buf25 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [conv2d_6, x_2], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_8.run(buf24, primals_15, buf25, 131072, grid=grid(131072), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, buf8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf27 = buf26; del buf26 # reuse buf28 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [conv2d_7, x_3], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_9.run(buf27, primals_17, buf28, 65536, grid=grid(65536), stream=stream0) del primals_17 buf29 = empty_strided_cuda((4, 1024, 2, 2), (4096, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d, x_4], Original ATen: [aten.avg_pool2d, aten.squeeze] triton_poi_fused_avg_pool2d_squeeze_10.run(buf28, buf29, 16, 1024, grid=grid(16, 1024), stream=stream0) return (buf29, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf10, buf12, buf14, buf16, buf18, buf19, buf21, buf22, buf24, buf25, buf27, buf28, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8, 64, 64), (32768, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((8, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((8, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 40, 4, 4), (640, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((512, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((1024, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class EncoderCNN(nn.Module): def __init__(self, latent_dim=1024): super(EncoderCNN, self).__init__() self.latent_dim = latent_dim self.conv1_1 = nn.Conv2d(8, 8, 4, stride=2, dilation=1, padding=1) self.conv1_2 = nn.Conv2d(8, 8, 4, stride=2, dilation=2, padding=3) self.conv1_4 = nn.Conv2d(8, 8, 4, stride=2, dilation=4, padding=6) self.conv1_8 = nn.Conv2d(8, 8, 4, stride=2, dilation=8, padding=12) self.conv1_16 = nn.Conv2d(8, 8, 4, stride=2, dilation=16, padding=24) self.conv2 = nn.Conv2d(8 * 5, self.latent_dim // 4, 4, stride=2, padding=1) self.conv3 = nn.Conv2d(self.latent_dim // 4, self.latent_dim // 2, 4, stride=2, padding=1) self.conv4 = nn.Conv2d(self.latent_dim // 2, self.latent_dim, 4, stride=2, padding=1) def forward(self, x): x1 = F.elu(self.conv1_1(x)) x2 = F.elu(self.conv1_2(x)) x4 = F.elu(self.conv1_4(x)) x8 = F.elu(self.conv1_8(x)) x16 = F.elu(self.conv1_16(x)) x = torch.cat((x1, x2, x4, x8, x16), dim=1) x = F.elu(self.conv2(x)) x = F.elu(self.conv3(x)) x = F.elu(self.conv4(x)) x = F.avg_pool2d(x, 2).squeeze() return x def train_order_block_ids(self): return [[0, 9], [10, 15]] def get_inputs(): return [torch.rand([4, 8, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 8 y1 = yindex // 8 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 8 * x2 + 128 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 32 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 8 y1 = yindex // 8 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 8 * x2 + 32768 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 40 y1 = yindex // 40 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 40 * x2 + 640 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 4096 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 8192 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 40 x1 = xindex // 40 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 8, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (8 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 16, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (8 * x1 + (-8 + x0)), tmp18, eviction_policy= 'evict_last', other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 24, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (8 * x1 + (-16 + x0)), tmp30, eviction_policy ='evict_last', other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tmp40 = tl.full([1], 32, tl.int64) tmp41 = tmp0 < tmp40 tmp42 = tmp39 & tmp41 tmp43 = tl.load(in_ptr3 + (8 * x1 + (-24 + x0)), tmp42, eviction_policy ='evict_last', other=0.0) tmp44 = tmp43 > tmp6 tmp45 = tmp43 * tmp8 tmp46 = libdevice.expm1(tmp45) tmp47 = tmp46 * tmp8 tmp48 = tl.where(tmp44, tmp45, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp42, tmp48, tmp49) tmp51 = tmp0 >= tmp40 tl.full([1], 40, tl.int64) tmp54 = tl.load(in_ptr4 + (8 * x1 + (-32 + x0)), tmp51, eviction_policy ='evict_last', other=0.0) tmp55 = tmp54 > tmp6 tmp56 = tmp54 * tmp8 tmp57 = libdevice.expm1(tmp56) tmp58 = tmp57 * tmp8 tmp59 = tl.where(tmp55, tmp56, tmp58) tmp60 = tl.full(tmp59.shape, 0.0, tmp59.dtype) tmp61 = tl.where(tmp51, tmp59, tmp60) tmp62 = tl.where(tmp42, tmp50, tmp61) tmp63 = tl.where(tmp30, tmp38, tmp62) tmp64 = tl.where(tmp18, tmp26, tmp63) tmp65 = tl.where(tmp4, tmp14, tmp64) tl.store(out_ptr0 + x2, tmp65, None) @triton.jit def triton_poi_fused_convolution_elu_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_convolution_elu_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_convolution_elu_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_avg_pool2d_squeeze_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y0 = yindex % 2 y4 = yindex // 2 y2 = yindex // 4 y5 = yindex % 4 tmp0 = tl.load(in_ptr0 + (x3 + 2048 * y0 + 8192 * y4), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1024 + x3 + 2048 * y0 + 8192 * y4), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4096 + x3 + 2048 * y0 + 8192 * y4), xmask & ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (5120 + x3 + 2048 * y0 + 8192 * y4), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (y5 + 4 * x3 + 4096 * y2), tmp8, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 8, 64, 64), (32768, 4096, 64, 1)) assert_size_stride(primals_4, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_9, (8,), (1,)) assert_size_stride(primals_10, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_11, (8,), (1,)) assert_size_stride(primals_12, (256, 40, 4, 4), (640, 16, 4, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (1024, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_17, (1024,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 8, 4, 4), (128, 1, 32, 8), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(64, 16)](primals_1, buf0, 64, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 8, 64, 64), (32768, 1, 512, 8), torch .float32) triton_poi_fused_1[grid(32, 4096)](primals_3, buf1, 32, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((8, 8, 4, 4), (128, 1, 32, 8), torch.float32) triton_poi_fused_0[grid(64, 16)](primals_4, buf2, 64, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((8, 8, 4, 4), (128, 1, 32, 8), torch.float32) triton_poi_fused_0[grid(64, 16)](primals_6, buf3, 64, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((8, 8, 4, 4), (128, 1, 32, 8), torch.float32) triton_poi_fused_0[grid(64, 16)](primals_8, buf4, 64, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((8, 8, 4, 4), (128, 1, 32, 8), torch.float32) triton_poi_fused_0[grid(64, 16)](primals_10, buf5, 64, 16, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 40, 4, 4), (640, 1, 160, 40), torch .float32) triton_poi_fused_2[grid(10240, 16)](primals_12, buf6, 10240, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((512, 256, 4, 4), (4096, 1, 1024, 256), torch.float32) triton_poi_fused_3[grid(131072, 16)](primals_14, buf7, 131072, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((1024, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) triton_poi_fused_4[grid(524288, 16)](primals_16, buf8, 524288, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 8, 32, 32), (8192, 1, 256, 8)) buf10 = buf9 del buf9 triton_poi_fused_convolution_5[grid(32768)](buf10, primals_2, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf11 = extern_kernels.convolution(buf1, buf2, stride=(2, 2), padding=(3, 3), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 8, 32, 32), (8192, 1, 256, 8)) buf12 = buf11 del buf11 triton_poi_fused_convolution_5[grid(32768)](buf12, primals_5, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf13 = extern_kernels.convolution(buf1, buf3, stride=(2, 2), padding=(6, 6), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 8, 32, 32), (8192, 1, 256, 8)) buf14 = buf13 del buf13 triton_poi_fused_convolution_5[grid(32768)](buf14, primals_7, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf15 = extern_kernels.convolution(buf1, buf4, stride=(2, 2), padding=(12, 12), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 8, 32, 32), (8192, 1, 256, 8)) buf16 = buf15 del buf15 triton_poi_fused_convolution_5[grid(32768)](buf16, primals_9, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf17 = extern_kernels.convolution(buf1, buf5, stride=(2, 2), padding=(24, 24), dilation=(16, 16), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 8, 32, 32), (8192, 1, 256, 8)) buf18 = buf17 del buf17 triton_poi_fused_convolution_5[grid(32768)](buf18, primals_11, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf19 = empty_strided_cuda((4, 40, 32, 32), (40960, 1, 1280, 40), torch.float32) triton_poi_fused_cat_6[grid(163840)](buf10, buf12, buf14, buf16, buf18, buf19, 163840, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf19, buf6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf21 = buf20 del buf20 buf22 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_convolution_elu_7[grid(262144)](buf21, primals_13, buf22, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf23 = extern_kernels.convolution(buf22, buf7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf24 = buf23 del buf23 buf25 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_convolution_elu_8[grid(131072)](buf24, primals_15, buf25, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf26 = extern_kernels.convolution(buf25, buf8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf27 = buf26 del buf26 buf28 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_convolution_elu_9[grid(65536)](buf27, primals_17, buf28, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_17 buf29 = empty_strided_cuda((4, 1024, 2, 2), (4096, 4, 2, 1), torch. float32) triton_poi_fused_avg_pool2d_squeeze_10[grid(16, 1024)](buf28, buf29, 16, 1024, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) return (buf29, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf10, buf12, buf14, buf16, buf18, buf19, buf21, buf22, buf24, buf25, buf27, buf28) class EncoderCNNNew(nn.Module): def __init__(self, latent_dim=1024): super(EncoderCNNNew, self).__init__() self.latent_dim = latent_dim self.conv1_1 = nn.Conv2d(8, 8, 4, stride=2, dilation=1, padding=1) self.conv1_2 = nn.Conv2d(8, 8, 4, stride=2, dilation=2, padding=3) self.conv1_4 = nn.Conv2d(8, 8, 4, stride=2, dilation=4, padding=6) self.conv1_8 = nn.Conv2d(8, 8, 4, stride=2, dilation=8, padding=12) self.conv1_16 = nn.Conv2d(8, 8, 4, stride=2, dilation=16, padding=24) self.conv2 = nn.Conv2d(8 * 5, self.latent_dim // 4, 4, stride=2, padding=1) self.conv3 = nn.Conv2d(self.latent_dim // 4, self.latent_dim // 2, 4, stride=2, padding=1) self.conv4 = nn.Conv2d(self.latent_dim // 2, self.latent_dim, 4, stride=2, padding=1) def train_order_block_ids(self): return [[0, 9], [10, 15]] def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv1_4.weight primals_7 = self.conv1_4.bias primals_8 = self.conv1_8.weight primals_9 = self.conv1_8.bias primals_10 = self.conv1_16.weight primals_11 = self.conv1_16.bias primals_12 = self.conv2.weight primals_13 = self.conv2.bias primals_14 = self.conv3.weight primals_15 = self.conv3.bias primals_16 = self.conv4.weight primals_17 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
SarodYatawatta/federated-pytorch-test
EncoderCNN
false
8,754
[ "Apache-2.0" ]
33
42a51ba12a92b32fa19273340d5b61e74e11d8e0
https://github.com/SarodYatawatta/federated-pytorch-test/tree/42a51ba12a92b32fa19273340d5b61e74e11d8e0
SVIGlobalMaxPool2D
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/2b/c2b7nbu2kavt4jud5fa4zbwyiwagifudtqccfefz7uiximgdluk7.py # Topologically Sorted Source Nodes: [max_2], Original ATen: [aten.max] # Source node to ATen node mapping: # max_2 => getitem_2 # Graph fragment: # %getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%max_2, 0), kwargs = {}) triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = triton_helpers.maximum(tmp6, tmp13) tmp17 = triton_helpers.maximum(tmp15, tmp16) tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = triton_helpers.maximum(tmp14, tmp21) tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = triton_helpers.maximum(tmp25, tmp26) tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp22, tmp29) tl.store(out_ptr0 + (x0), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [max_2], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SVIGlobalMaxPool2D(nn.Module): """ Expects :param x: [examples, samples, channels, H, W] :return: [examples, samples, channels] """ def __init__(self): super(SVIGlobalMaxPool2D, self).__init__() def forward(self, x): x = x.max(4)[0].max(3)[0] return x def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = triton_helpers.maximum(tmp6, tmp13) tmp17 = triton_helpers.maximum(tmp15, tmp16) tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = triton_helpers.maximum(tmp14, tmp21) tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp27 = triton_helpers.maximum(tmp25, tmp26) tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = triton_helpers.maximum(tmp22, tmp29) tl.store(out_ptr0 + x0, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class SVIGlobalMaxPool2DNew(nn.Module): """ Expects :param x: [examples, samples, channels, H, W] :return: [examples, samples, channels] """ def __init__(self): super(SVIGlobalMaxPool2DNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SebFar/radial_bnn
SVIGlobalMaxPool2D
false
8,755
[ "MIT" ]
29
2497e5e009409ac910d609850eae27f7cc74cec2
https://github.com/SebFar/radial_bnn/tree/2497e5e009409ac910d609850eae27f7cc74cec2
GC3d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/h2/ch2rs62pxdgoo4g5752v5gxkh5n5qav4turlugzhpme3voclettu.py # Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv3d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1, 1, 1], [0, 3, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = (xindex // 64) tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/de/cdeccmxor2yx4bbsxp4ljvqppmxmrzo3wctdroe2dulbfwm7374x.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.add] # Source node to ATen node mapping: # x => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_1, %squeeze_3), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 64) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (256, 4, 1, 7, 1), (28, 7, 7, 1, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 256, 1, 1, 7), (1792, 7, 7, 7, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (256, 4, 1, 1, 7), (28, 7, 7, 7, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (4, 256, 1, 7, 1), (1792, 7, 7, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 3, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 256, 4, 4, 4), (16384, 64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 16384, grid=grid(16384), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_l], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 256, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_4, stride=(1, 1, 1), padding=(0, 0, 3), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv3d_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_6, stride=(1, 1, 1), padding=(0, 0, 3), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf3, (1, 256, 4, 4, 4), (16384, 64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv3d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf4, primals_7, 16384, grid=grid(16384), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_r], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 256, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(0, 3, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf6 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf6, primals_5, buf5, primals_9, 256, grid=grid(256), stream=stream0) del buf5 del primals_5 del primals_9 return (buf6, primals_1, primals_4, primals_6, primals_8, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4, 1, 7, 1), (28, 7, 7, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 256, 1, 1, 7), (1792, 7, 7, 7, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 4, 1, 1, 7), (28, 7, 7, 7, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 256, 1, 7, 1), (1792, 7, 7, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn class GC3d(nn.Module): def __init__(self, inplanes, planes, kh=7, kw=7, mdim=256, which_conv= nn.Conv3d): super(GC3d, self).__init__() self.conv_l1 = which_conv(inplanes, mdim, kernel_size=(1, kh, 1), padding=(0, int(kh / 2), 0)) self.conv_l2 = which_conv(mdim, planes, kernel_size=(1, 1, kw), padding=(0, 0, int(kw / 2))) self.conv_r1 = which_conv(inplanes, mdim, kernel_size=(1, 1, kw), padding=(0, 0, int(kw / 2))) self.conv_r2 = which_conv(mdim, planes, kernel_size=(1, kh, 1), padding=(0, int(kh / 2), 0)) def forward(self, x): x_l = self.conv_l2(self.conv_l1(x)) x_r = self.conv_r2(self.conv_r1(x)) x = x_l + x_r return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (256, 4, 1, 7, 1), (28, 7, 7, 1, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 256, 1, 1, 7), (1792, 7, 7, 7, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (256, 4, 1, 1, 7), (28, 7, 7, 7, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (4, 256, 1, 7, 1), (1792, 7, 7, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 3, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 256, 4, 4, 4), (16384, 64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16384)](buf1, primals_2, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 256, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_4, stride=(1, 1, 1), padding=(0, 0, 3), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_6, stride=(1, 1, 1), padding=(0, 0, 3), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf3, (1, 256, 4, 4, 4), (16384, 64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_0[grid(16384)](buf4, primals_7, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf5 = extern_kernels.convolution(reinterpret_tensor(buf4, (1, 256, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(0, 3, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf6 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_1[grid(256)](buf6, primals_5, buf5, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del primals_5 del primals_9 return (buf6, primals_1, primals_4, primals_6, primals_8, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, buf4) class GC3dNew(nn.Module): def __init__(self, inplanes, planes, kh=7, kw=7, mdim=256, which_conv= nn.Conv3d): super(GC3dNew, self).__init__() self.conv_l1 = which_conv(inplanes, mdim, kernel_size=(1, kh, 1), padding=(0, int(kh / 2), 0)) self.conv_l2 = which_conv(mdim, planes, kernel_size=(1, 1, kw), padding=(0, 0, int(kw / 2))) self.conv_r1 = which_conv(inplanes, mdim, kernel_size=(1, 1, kw), padding=(0, 0, int(kw / 2))) self.conv_r2 = which_conv(mdim, planes, kernel_size=(1, kh, 1), padding=(0, int(kh / 2), 0)) def forward(self, input_0): primals_1 = self.conv_l1.weight primals_2 = self.conv_l1.bias primals_4 = self.conv_l2.weight primals_5 = self.conv_l2.bias primals_6 = self.conv_r1.weight primals_7 = self.conv_r1.bias primals_8 = self.conv_r2.weight primals_9 = self.conv_r2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Schmiddo/d2conv3d
GC3d
false
8,756
[ "MIT" ]
16
9b330be56f0dfb9657a63e3fb3394ab36b35a67b
https://github.com/Schmiddo/d2conv3d/tree/9b330be56f0dfb9657a63e3fb3394ab36b35a67b
DCENetLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/s5/cs5oqnr6tixfkqv6igmjhc37j5qtzj5urknjknrvbetyous3m46e.py # Topologically Sorted Source Nodes: [mse_loss, mse_loss_1, mul_2, pow_1, exp, add, sub, sub_1, sum_1, kl_loss, mul_3, loss], Original ATen: [aten.mse_loss, aten.mul, aten.pow, aten.exp, aten.add, aten.sub, aten.sum] # Source node to ATen node mapping: # add => add # exp => exp # kl_loss => mul # loss => add_1 # mse_loss => mean, pow_2, sub_2 # mse_loss_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # pow_1 => pow_1 # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg2_1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, 4), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg1_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %exp), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %arg1_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, 1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.5), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, -3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) triton_per_fused_add_exp_mse_loss_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_mse_loss_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_mse_loss_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_exp_mse_loss_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp7 = tl.load(in_ptr2 + (r0), None) tmp9 = tl.load(in_ptr3 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp8 = tmp7 * tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tmp11 - tmp9 tmp13 = 1.0 tmp14 = tmp12 - tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 256.0 tmp19 = tmp6 / tmp18 tmp20 = 4.0 tmp21 = tmp19 * tmp20 tmp22 = tmp21 * tmp20 tmp23 = 0.5 tmp24 = tmp17 * tmp23 tmp25 = -3.0 tmp26 = tmp24 * tmp25 tmp27 = tmp22 + tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp27, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mse_loss, mse_loss_1, mul_2, pow_1, exp, add, sub, sub_1, sum_1, kl_loss, mul_3, loss], Original ATen: [aten.mse_loss, aten.mul, aten.pow, aten.exp, aten.add, aten.sub, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_add_exp_mse_loss_mul_pow_sub_sum_0.run(buf2, arg3_1, arg2_1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F class DCENetLoss(nn.Module): def __init__(self, config): super(DCENetLoss, self).__init__() self.beta = config['beta'] self.pred_seq = config['pred_seq'] def forward(self, mu, log_var, y_pred, y_true): kl_loss = 0.5 * torch.sum(mu.pow(2) + log_var.exp() - log_var - 1) mse_loss = self.pred_seq * F.mse_loss(y_pred, y_true) loss = mse_loss * self.beta + kl_loss * (1 - self.beta) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(beta=4, pred_seq=4)}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_mse_loss_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp7 = tl.load(in_ptr2 + r0, None) tmp9 = tl.load(in_ptr3 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp8 = tmp7 * tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tmp11 - tmp9 tmp13 = 1.0 tmp14 = tmp12 - tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 256.0 tmp19 = tmp6 / tmp18 tmp20 = 4.0 tmp21 = tmp19 * tmp20 tmp22 = tmp21 * tmp20 tmp23 = 0.5 tmp24 = tmp17 * tmp23 tmp25 = -3.0 tmp26 = tmp24 * tmp25 tmp27 = tmp22 + tmp26 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp27, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_exp_mse_loss_mul_pow_sub_sum_0[grid(1)](buf2, arg3_1, arg2_1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, class DCENetLossNew(nn.Module): def __init__(self, config): super(DCENetLossNew, self).__init__() self.beta = config['beta'] self.pred_seq = config['pred_seq'] def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
SeongjuLee/DCENet-PyTorch
DCENetLoss
false
8,757
[ "MIT" ]
10
eb477ce06356ae597c162dd3229285400ebf9168
https://github.com/SeongjuLee/DCENet-PyTorch/tree/eb477ce06356ae597c162dd3229285400ebf9168
lrBLock_l2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/xr/cxru5osgc5mzu3dewheyw5qpt375n33ampiasr2lfzsfgmra52ie.py # Topologically Sorted Source Nodes: [x_down2], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] # Source node to ATen node mapping: # x_down2 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min, clamp_min_2, clamp_min_3, convert_element_type, convert_element_type_1, convert_element_type_3, iota, mul, mul_2, mul_3, mul_4, sub, sub_2, sub_3, sub_4, sub_5, sub_6 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (2,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {}) # %add_6 : [num_users=7] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 2 x0 = xindex % 2 x2 = (xindex // 4) x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = x0 tmp11 = tmp10.to(tl.float32) tmp12 = tmp11 + tmp2 tmp13 = tmp12 * tmp4 tmp14 = tmp13 - tmp2 tmp15 = triton_helpers.maximum(tmp14, tmp7) tmp16 = tmp15.to(tl.int32) tmp17 = tl.load(in_ptr0 + (tmp16 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.full([1], 1, tl.int64) tmp19 = tmp16 + tmp18 tmp20 = tl.full([1], 3, tl.int64) tmp21 = triton_helpers.minimum(tmp19, tmp20) tmp22 = tl.load(in_ptr0 + (tmp21 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tmp22 - tmp17 tmp24 = tmp16.to(tl.float32) tmp25 = tmp15 - tmp24 tmp26 = triton_helpers.maximum(tmp25, tmp7) tmp27 = 1.0 tmp28 = triton_helpers.minimum(tmp26, tmp27) tmp29 = tmp23 * tmp28 tmp30 = tmp17 + tmp29 tmp31 = tmp9 + tmp18 tmp32 = triton_helpers.minimum(tmp31, tmp20) tmp33 = tl.load(in_ptr0 + (tmp21 + (4*tmp32) + (16*x2)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (tmp16 + (4*tmp32) + (16*x2)), xmask, eviction_policy='evict_last') tmp35 = tmp33 - tmp34 tmp36 = tmp35 * tmp28 tmp37 = tmp34 + tmp36 tmp38 = tmp37 - tmp30 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp27) tmp43 = tmp38 * tmp42 tmp44 = tmp30 + tmp43 tl.store(in_out_ptr0 + (x3), tmp44, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/st/cstnx2zr5hpy7zneeauuznzugsasea674yukbobigykvbvtbqvp7.py # Topologically Sorted Source Nodes: [x_reup], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # x_reup => convert_element_type_5 # Graph fragment: # %convert_element_type_5 : [num_users=7] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {}) triton_poi_fused__to_copy_1 = async_compile.triton('triton_poi_fused__to_copy_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/nr/cnrjfeo7z3ghtb6mys6jsqqdswfwuhaqzvrbuoj2x63ljtq4j7m5.py # Topologically Sorted Source Nodes: [x_reup], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # x_reup => add_8, clamp_max_4 # Graph fragment: # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {}) # %clamp_max_4 : [num_users=5] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_8, 1), kwargs = {}) triton_poi_fused_add_clamp_2 = async_compile.triton('triton_poi_fused_add_clamp_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = triton_helpers.minimum(tmp10, tmp9) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/3z/c3zvgzgjl5zhwhbhzewychii4zy6oobyli3s2axnz3v6ky2opuew.py # Topologically Sorted Source Nodes: [x_reup], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] # Source node to ATen node mapping: # x_reup => add_7, clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_5, sub_7, sub_9 # Graph fragment: # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.5), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 0.5), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, 0.5), kwargs = {}) # %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_7, 0.0), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {}) # %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0.0), kwargs = {}) # %clamp_max_6 : [num_users=5] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {}) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xl/cxlfa5k326fru6wrjtmwtgcx2lxzwek6zq2tgskpwiw7pzwmwo3e.py # Topologically Sorted Source Nodes: [x_reup, Laplace_1], Original ATen: [aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # Laplace_1 => sub_14 # x_reup => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_11, add_12, add_13, mul_7, mul_8, mul_9, sub_10, sub_11, sub_13 # Graph fragment: # %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_6, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_6, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {}) # %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_6, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {}) # %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_6, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_6), kwargs = {}) # %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_7), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_6), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_8), kwargs = {}) # %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_12, %add_11), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %clamp_max_7), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mul_9), kwargs = {}) # %sub_14 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %add_13), kwargs = {}) triton_poi_fused__unsafe_index_add_mul_sub_4 = async_compile.triton('triton_poi_fused__unsafe_index_add_mul_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_mul_sub_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_mul_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + (x4), xmask) tmp20 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr7 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 2, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (2*tmp4) + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + (2*tmp4) + (4*x2)), xmask, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp21 = tmp20 + tmp1 tmp22 = tmp20 < 0 tmp23 = tl.where(tmp22, tmp21, tmp20) tmp24 = tl.load(in_ptr2 + (tmp8 + (2*tmp23) + (4*x2)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (tmp13 + (2*tmp23) + (4*x2)), xmask, eviction_policy='evict_last') tmp26 = tmp25 - tmp24 tmp27 = tmp26 * tmp16 tmp28 = tmp24 + tmp27 tmp29 = tmp28 - tmp18 tmp31 = tmp29 * tmp30 tmp32 = tmp18 + tmp31 tmp33 = tmp19 - tmp32 tl.store(in_out_ptr0 + (x4), tmp33, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ve/cvefilbsmy7xps6zprinqvyq2xe4vq2p2ejasxm7lngek4g6bnpl.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%add_6, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/x3/cx3lp3yvilr2ywn2rr5r6ufwkr42whge5qcphlinjvizyhfo5ovr.py # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten._native_batch_norm_legit] # Source node to ATen node mapping: # instance_norm => add_14, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_14,), kwargs = {}) triton_poi_fused__native_batch_norm_legit_6 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__native_batch_norm_legit_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/wy/cwyfzzpd43ixa7gxhqwpa4iad5jox65fdhvactlt42kpz6dnuoom.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {}) triton_poi_fused_relu_7 = async_compile.triton('triton_poi_fused_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/uz/cuzzarnodm5ok5si3z57x7f2lkjo7fvsccsy52mteybclqptydqd.py # Topologically Sorted Source Nodes: [conv2d_2, instance_norm_2, x_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # instance_norm_2 => add_17, rsqrt_2, var_mean_2 # x_3 => relu_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%sub_14, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_8, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_17,), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_9,), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_relu_8 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_8(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 16.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.store(out_ptr2 + (r2 + (16*x3)), tmp27, xmask) tl.store(out_ptr3 + (x3), tmp24, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/al/caly4e7xuixog2pvmy5o7rfwghhofbbrai65ww7cul3am6qmdqyh.py # Topologically Sorted Source Nodes: [x_1, add, x_2, conv2d_3, x_4, add_1, x_5, interpolate_2, output2], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.relu, aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.threshold_backward] # Source node to ATen node mapping: # add => add_16 # add_1 => add_19 # conv2d_3 => convolution_3 # interpolate_2 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_24, add_25, add_26, mul_16, mul_17, mul_18, sub_22, sub_23, sub_25 # output2 => add_27 # x_1 => add_15, rsqrt_1, var_mean_1 # x_2 => relu_1 # x_4 => add_18, rsqrt_3, var_mean_3 # x_5 => relu_3 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_6, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_15,), kwargs = {}) # %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_7, %add_6), kwargs = {}) # %relu_1 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%add_16,), kwargs = {}) # %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_10, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {}) # %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_18,), kwargs = {}) # %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %sub_14), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_19,), kwargs = {}) # %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {}) # %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {}) # %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {}) # %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {}) # %sub_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_22, %clamp_max_6), kwargs = {}) # %add_24 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_16), kwargs = {}) # %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %clamp_max_6), kwargs = {}) # %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_17), kwargs = {}) # %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_25, %add_24), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %clamp_max_7), kwargs = {}) # %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_24, %mul_18), kwargs = {}) # %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_26, %relu_3), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit__unsafe_index_add_convolution_mul_relu_sub_threshold_backward_9 = async_compile.triton('triton_per_fused__native_batch_norm_legit__unsafe_index_add_convolution_mul_relu_sub_threshold_backward_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*i64', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*i1', 16: '*fp32', 17: 'i32', 18: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit__unsafe_index_add_convolution_mul_relu_sub_threshold_backward_9', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr2'], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit__unsafe_index_add_convolution_mul_relu_sub_threshold_backward_9(in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 r5 = (rindex // 4) r4 = rindex % 4 tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_out_ptr0 + (r3 + (16*x0)), xmask, other=0.0) tmp25 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr2 + (r5), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr3 + (r4), None, eviction_policy='evict_last') tmp59 = tl.load(in_ptr5 + (r5), None, eviction_policy='evict_last') tmp69 = tl.load(in_ptr6 + (r4), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr7 + (r4), None, eviction_policy='evict_last') tmp93 = tl.load(in_ptr8 + (r5), None, eviction_policy='evict_last') tmp102 = tl.load(in_ptr9 + (r3 + (16*x0)), xmask, other=0.0) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp26 = tmp24 + tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.where(xmask, tmp27, 0) tmp30 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp32 = tl.where(xmask, tmp30, 0) tmp33 = tl.sum(tmp32, 1)[:, None] tmp34 = tl.full([XBLOCK, 1], 16, tl.int32) tmp35 = tmp34.to(tl.float32) tmp36 = tmp33 / tmp35 tmp37 = tmp27 - tmp36 tmp38 = tmp37 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.where(xmask, tmp39, 0) tmp42 = tl.sum(tmp41, 1)[:, None] tmp44 = tl.full([XBLOCK, RBLOCK], 2, tl.int32) tmp45 = tmp43 + tmp44 tmp46 = tmp43 < 0 tmp47 = tl.where(tmp46, tmp45, tmp43) tmp49 = tmp48 + tmp44 tmp50 = tmp48 < 0 tmp51 = tl.where(tmp50, tmp49, tmp48) tmp52 = tl.load(in_ptr0 + (tmp51 + (2*tmp47) + (4*x0)), xmask, eviction_policy='evict_last') tmp53 = tmp52 - tmp8 tmp54 = tmp53 * tmp23 tmp55 = tl.load(in_ptr4 + (tmp51 + (2*tmp47) + (4*x0)), xmask, eviction_policy='evict_last') tmp56 = tmp54 + tmp55 tmp57 = tl.full([1, 1], 0, tl.int32) tmp58 = triton_helpers.maximum(tmp57, tmp56) tmp60 = tmp59 + tmp44 tmp61 = tmp59 < 0 tmp62 = tl.where(tmp61, tmp60, tmp59) tmp63 = tl.load(in_ptr0 + (tmp51 + (2*tmp62) + (4*x0)), xmask, eviction_policy='evict_last') tmp64 = tmp63 - tmp8 tmp65 = tmp64 * tmp23 tmp66 = tl.load(in_ptr4 + (tmp51 + (2*tmp62) + (4*x0)), xmask, eviction_policy='evict_last') tmp67 = tmp65 + tmp66 tmp68 = triton_helpers.maximum(tmp57, tmp67) tmp70 = tmp69 + tmp44 tmp71 = tmp69 < 0 tmp72 = tl.where(tmp71, tmp70, tmp69) tmp73 = tl.load(in_ptr0 + (tmp72 + (2*tmp62) + (4*x0)), xmask, eviction_policy='evict_last') tmp74 = tmp73 - tmp8 tmp75 = tmp74 * tmp23 tmp76 = tl.load(in_ptr4 + (tmp72 + (2*tmp62) + (4*x0)), xmask, eviction_policy='evict_last') tmp77 = tmp75 + tmp76 tmp78 = triton_helpers.maximum(tmp57, tmp77) tmp79 = tmp78 - tmp68 tmp81 = tmp79 * tmp80 tmp82 = tmp68 + tmp81 tmp83 = tl.load(in_ptr0 + (tmp72 + (2*tmp47) + (4*x0)), xmask, eviction_policy='evict_last') tmp84 = tmp83 - tmp8 tmp85 = tmp84 * tmp23 tmp86 = tl.load(in_ptr4 + (tmp72 + (2*tmp47) + (4*x0)), xmask, eviction_policy='evict_last') tmp87 = tmp85 + tmp86 tmp88 = triton_helpers.maximum(tmp57, tmp87) tmp89 = tmp88 - tmp58 tmp90 = tmp89 * tmp80 tmp91 = tmp58 + tmp90 tmp92 = tmp91 - tmp82 tmp94 = tmp92 * tmp93 tmp95 = tmp82 + tmp94 tmp96 = tmp26 - tmp36 tmp97 = 16.0 tmp98 = tmp42 / tmp97 tmp99 = tmp98 + tmp21 tmp100 = libdevice.rsqrt(tmp99) tmp101 = tmp96 * tmp100 tmp103 = tmp101 + tmp102 tmp104 = triton_helpers.maximum(tmp57, tmp103) tmp105 = tmp95 + tmp104 tmp106 = 0.0 tmp107 = tmp104 <= tmp106 tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) tl.store(in_out_ptr0 + (r3 + (16*x0)), tmp26, xmask) tl.store(in_out_ptr2 + (r3 + (16*x0)), tmp105, xmask) tl.store(out_ptr4 + (r3 + (16*x0)), tmp107, xmask) tl.store(out_ptr5 + (x0), tmp100, xmask) tl.store(out_ptr2 + (x0), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/sf/csfbluv5r7zjpd4zho6dpfgujeufxjpp3onbkclbkmauorb2dcxn.py # Topologically Sorted Source Nodes: [add, x_2], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add => add_16 # x_2 => relu_1 # Graph fragment: # %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_7, %add_6), kwargs = {}) # %relu_1 : [num_users=5] = call_function[target=torch.ops.aten.relu.default](args = (%add_16,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_10 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2), xmask) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 <= tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf2 = buf1; del buf1 # reuse buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_down2], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf3, primals_1, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_reup], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_1.run(buf4, 4, grid=grid(4), stream=stream0) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_reup], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_2.run(buf5, 4, grid=grid(4), stream=stream0) buf6 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_reup], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_1.run(buf6, 4, grid=grid(4), stream=stream0) buf7 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_reup], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_2.run(buf7, 4, grid=grid(4), stream=stream0) buf8 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_reup], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3.run(buf8, 4, grid=grid(4), stream=stream0) buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_reup], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3.run(buf10, 4, grid=grid(4), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [x_reup, Laplace_1], Original ATen: [aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_mul_sub_4.run(buf11, buf4, buf6, buf3, buf7, buf8, primals_1, buf5, buf10, 256, grid=grid(256), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf3, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 2, 2), (16, 4, 2, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf13, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf14 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf15 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten._native_batch_norm_legit] triton_poi_fused__native_batch_norm_legit_6.run(buf13, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] triton_poi_fused_relu_7.run(buf13, buf14, buf15, buf16, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 4, 2, 2), (16, 4, 2, 1)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf18, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf11, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 4, 4, 4), (64, 16, 4, 1)) buf22 = buf21; del buf21 # reuse buf23 = buf15; del buf15 # reuse buf27 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf26 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d_2, instance_norm_2, x_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.relu] triton_per_fused__native_batch_norm_legit_convolution_relu_8.run(buf22, primals_7, buf23, buf27, buf26, 16, 16, grid=grid(16), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 4, 4, 4), (64, 16, 4, 1)) buf19 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf20 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf29 = buf28; del buf28 # reuse buf30 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf37 = buf35; del buf35 # reuse buf38 = buf37; del buf37 # reuse buf39 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf33 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [x_1, add, x_2, conv2d_3, x_4, add_1, x_5, interpolate_2, output2], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.relu, aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.threshold_backward] triton_per_fused__native_batch_norm_legit__unsafe_index_add_convolution_mul_relu_sub_threshold_backward_9.run(buf29, buf38, buf18, primals_9, buf5, buf6, buf3, buf4, buf7, buf8, buf10, buf11, buf19, buf20, buf30, buf39, buf33, 16, 16, grid=grid(16), stream=stream0) del primals_9 buf40 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [add, x_2], Original ATen: [aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_10.run(buf18, buf19, buf20, buf3, buf40, 64, grid=grid(64), stream=stream0) del buf19 del buf20 return (buf38, primals_2, primals_4, primals_6, primals_8, buf3, buf4, buf5, buf6, buf7, buf8, buf10, buf11, buf13, buf16, buf18, buf22, reinterpret_tensor(buf26, (16, ), (1, ), 0), buf27, buf29, reinterpret_tensor(buf33, (16, ), (1, ), 0), buf39, reinterpret_tensor(buf30, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf23, (1, 16, 1, 1), (16, 1, 1, 1), 0), buf40, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class resBlock(nn.Module): def __init__(self, channelDepth, windowSize=3): super(resBlock, self).__init__() padding = math.floor(windowSize / 2) self.conv1 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, padding) self.conv2 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, padding) self.IN_conv = nn.InstanceNorm2d(channelDepth, track_running_stats= False, affine=False) def forward(self, x): res = x x = F.relu(self.IN_conv(self.conv1(x))) x = self.IN_conv(self.conv2(x)) x = F.relu(x + res) return x class lrBLock_l2(nn.Module): def __init__(self, channelDepth, windowSize=3): super(lrBLock_l2, self).__init__() math.floor(windowSize / 2) self.res_l2 = resBlock(channelDepth, windowSize) self.res_l1 = resBlock(channelDepth, windowSize) def forward(self, x): x_down2 = F.interpolate(x, scale_factor=0.5, mode='bilinear') x_reup = F.interpolate(x_down2, scale_factor=2, mode='bilinear') Laplace_1 = x - x_reup Scale1 = self.res_l1(x_down2) Scale2 = self.res_l2(Laplace_1) output2 = F.interpolate(Scale1, scale_factor=2, mode='bilinear' ) + Scale2 return output2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channelDepth': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x2 = xindex // 4 x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 2.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = x0 tmp11 = tmp10.to(tl.float32) tmp12 = tmp11 + tmp2 tmp13 = tmp12 * tmp4 tmp14 = tmp13 - tmp2 tmp15 = triton_helpers.maximum(tmp14, tmp7) tmp16 = tmp15.to(tl.int32) tmp17 = tl.load(in_ptr0 + (tmp16 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp18 = tl.full([1], 1, tl.int64) tmp19 = tmp16 + tmp18 tmp20 = tl.full([1], 3, tl.int64) tmp21 = triton_helpers.minimum(tmp19, tmp20) tmp22 = tl.load(in_ptr0 + (tmp21 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tmp22 - tmp17 tmp24 = tmp16.to(tl.float32) tmp25 = tmp15 - tmp24 tmp26 = triton_helpers.maximum(tmp25, tmp7) tmp27 = 1.0 tmp28 = triton_helpers.minimum(tmp26, tmp27) tmp29 = tmp23 * tmp28 tmp30 = tmp17 + tmp29 tmp31 = tmp9 + tmp18 tmp32 = triton_helpers.minimum(tmp31, tmp20) tmp33 = tl.load(in_ptr0 + (tmp21 + 4 * tmp32 + 16 * x2), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (tmp16 + 4 * tmp32 + 16 * x2), xmask, eviction_policy='evict_last') tmp35 = tmp33 - tmp34 tmp36 = tmp35 * tmp28 tmp37 = tmp34 + tmp36 tmp38 = tmp37 - tmp30 tmp39 = tmp9.to(tl.float32) tmp40 = tmp8 - tmp39 tmp41 = triton_helpers.maximum(tmp40, tmp7) tmp42 = triton_helpers.minimum(tmp41, tmp27) tmp43 = tmp38 * tmp42 tmp44 = tmp30 + tmp43 tl.store(in_out_ptr0 + x3, tmp44, xmask) @triton.jit def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = triton_helpers.minimum(tmp10, tmp9) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_mul_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x4, xmask) tmp20 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 2, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + 2 * tmp4 + 4 * x2), xmask, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp21 = tmp20 + tmp1 tmp22 = tmp20 < 0 tmp23 = tl.where(tmp22, tmp21, tmp20) tmp24 = tl.load(in_ptr2 + (tmp8 + 2 * tmp23 + 4 * x2), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (tmp13 + 2 * tmp23 + 4 * x2), xmask, eviction_policy='evict_last') tmp26 = tmp25 - tmp24 tmp27 = tmp26 * tmp16 tmp28 = tmp24 + tmp27 tmp29 = tmp28 - tmp18 tmp31 = tmp29 * tmp30 tmp32 = tmp18 + tmp31 tmp33 = tmp19 - tmp32 tl.store(in_out_ptr0 + x4, tmp33, xmask) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_relu_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_relu_8(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 16.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp27, xmask) tl.store(out_ptr3 + x3, tmp24, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit__unsafe_index_add_convolution_mul_relu_sub_threshold_backward_9( in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 r5 = rindex // 4 r4 = rindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp25 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr2 + r5, None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr3 + r4, None, eviction_policy='evict_last') tmp59 = tl.load(in_ptr5 + r5, None, eviction_policy='evict_last') tmp69 = tl.load(in_ptr6 + r4, None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr7 + r4, None, eviction_policy='evict_last') tmp93 = tl.load(in_ptr8 + r5, None, eviction_policy='evict_last') tmp102 = tl.load(in_ptr9 + (r3 + 16 * x0), xmask, other=0.0) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp26 = tmp24 + tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tl.where(xmask, tmp27, 0) tmp30 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp32 = tl.where(xmask, tmp30, 0) tmp33 = tl.sum(tmp32, 1)[:, None] tmp34 = tl.full([XBLOCK, 1], 16, tl.int32) tmp35 = tmp34.to(tl.float32) tmp36 = tmp33 / tmp35 tmp37 = tmp27 - tmp36 tmp38 = tmp37 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.where(xmask, tmp39, 0) tmp42 = tl.sum(tmp41, 1)[:, None] tmp44 = tl.full([XBLOCK, RBLOCK], 2, tl.int32) tmp45 = tmp43 + tmp44 tmp46 = tmp43 < 0 tmp47 = tl.where(tmp46, tmp45, tmp43) tmp49 = tmp48 + tmp44 tmp50 = tmp48 < 0 tmp51 = tl.where(tmp50, tmp49, tmp48) tmp52 = tl.load(in_ptr0 + (tmp51 + 2 * tmp47 + 4 * x0), xmask, eviction_policy='evict_last') tmp53 = tmp52 - tmp8 tmp54 = tmp53 * tmp23 tmp55 = tl.load(in_ptr4 + (tmp51 + 2 * tmp47 + 4 * x0), xmask, eviction_policy='evict_last') tmp56 = tmp54 + tmp55 tmp57 = tl.full([1, 1], 0, tl.int32) tmp58 = triton_helpers.maximum(tmp57, tmp56) tmp60 = tmp59 + tmp44 tmp61 = tmp59 < 0 tmp62 = tl.where(tmp61, tmp60, tmp59) tmp63 = tl.load(in_ptr0 + (tmp51 + 2 * tmp62 + 4 * x0), xmask, eviction_policy='evict_last') tmp64 = tmp63 - tmp8 tmp65 = tmp64 * tmp23 tmp66 = tl.load(in_ptr4 + (tmp51 + 2 * tmp62 + 4 * x0), xmask, eviction_policy='evict_last') tmp67 = tmp65 + tmp66 tmp68 = triton_helpers.maximum(tmp57, tmp67) tmp70 = tmp69 + tmp44 tmp71 = tmp69 < 0 tmp72 = tl.where(tmp71, tmp70, tmp69) tmp73 = tl.load(in_ptr0 + (tmp72 + 2 * tmp62 + 4 * x0), xmask, eviction_policy='evict_last') tmp74 = tmp73 - tmp8 tmp75 = tmp74 * tmp23 tmp76 = tl.load(in_ptr4 + (tmp72 + 2 * tmp62 + 4 * x0), xmask, eviction_policy='evict_last') tmp77 = tmp75 + tmp76 tmp78 = triton_helpers.maximum(tmp57, tmp77) tmp79 = tmp78 - tmp68 tmp81 = tmp79 * tmp80 tmp82 = tmp68 + tmp81 tmp83 = tl.load(in_ptr0 + (tmp72 + 2 * tmp47 + 4 * x0), xmask, eviction_policy='evict_last') tmp84 = tmp83 - tmp8 tmp85 = tmp84 * tmp23 tmp86 = tl.load(in_ptr4 + (tmp72 + 2 * tmp47 + 4 * x0), xmask, eviction_policy='evict_last') tmp87 = tmp85 + tmp86 tmp88 = triton_helpers.maximum(tmp57, tmp87) tmp89 = tmp88 - tmp58 tmp90 = tmp89 * tmp80 tmp91 = tmp58 + tmp90 tmp92 = tmp91 - tmp82 tmp94 = tmp92 * tmp93 tmp95 = tmp82 + tmp94 tmp96 = tmp26 - tmp36 tmp97 = 16.0 tmp98 = tmp42 / tmp97 tmp99 = tmp98 + tmp21 tmp100 = libdevice.rsqrt(tmp99) tmp101 = tmp96 * tmp100 tmp103 = tmp101 + tmp102 tmp104 = triton_helpers.maximum(tmp57, tmp103) tmp105 = tmp95 + tmp104 tmp106 = 0.0 tmp107 = tmp104 <= tmp106 tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp26, xmask) tl.store(in_out_ptr2 + (r3 + 16 * x0), tmp105, xmask) tl.store(out_ptr4 + (r3 + 16 * x0), tmp107, xmask) tl.store(out_ptr5 + x0, tmp100, xmask) tl.store(out_ptr2 + x0, tmp36, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 <= tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf2 = buf1 del buf1 buf3 = buf2 del buf2 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (64)](buf3, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_1[grid(4)](buf4, 4, XBLOCK=4, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_2[grid(4)](buf5, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf6 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_1[grid(4)](buf6, 4, XBLOCK=4, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_add_clamp_2[grid(4)](buf7, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf8 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(4)](buf8, 4, XBLOCK=4, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(4)](buf10, 4, XBLOCK=4, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf11 = buf9 del buf9 triton_poi_fused__unsafe_index_add_mul_sub_4[grid(256)](buf11, buf4, buf6, buf3, buf7, buf8, primals_1, buf5, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf12 = extern_kernels.convolution(buf3, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 2, 2), (16, 4, 2, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_5[grid(64)](buf13, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf14 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf15 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_poi_fused__native_batch_norm_legit_6[grid(16)](buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_relu_7[grid(64)](buf13, buf14, buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = extern_kernels.convolution(buf16, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 4, 2, 2), (16, 4, 2, 1)) buf18 = buf17 del buf17 triton_poi_fused_convolution_5[grid(64)](buf18, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf21 = extern_kernels.convolution(buf11, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 4, 4, 4), (64, 16, 4, 1)) buf22 = buf21 del buf21 buf23 = buf15 del buf15 buf27 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf26 = buf14 del buf14 triton_per_fused__native_batch_norm_legit_convolution_relu_8[grid(16)]( buf22, primals_7, buf23, buf27, buf26, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_7 buf28 = extern_kernels.convolution(buf27, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 4, 4, 4), (64, 16, 4, 1)) buf19 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf20 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf29 = buf28 del buf28 buf30 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf35 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf37 = buf35 del buf35 buf38 = buf37 del buf37 buf39 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf33 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit__unsafe_index_add_convolution_mul_relu_sub_threshold_backward_9[ grid(16)](buf29, buf38, buf18, primals_9, buf5, buf6, buf3, buf4, buf7, buf8, buf10, buf11, buf19, buf20, buf30, buf39, buf33, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_9 buf40 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_10[grid(64)](buf18, buf19, buf20, buf3, buf40, 64, XBLOCK=64, num_warps=1, num_stages=1 ) del buf19 del buf20 return (buf38, primals_2, primals_4, primals_6, primals_8, buf3, buf4, buf5, buf6, buf7, buf8, buf10, buf11, buf13, buf16, buf18, buf22, reinterpret_tensor(buf26, (16,), (1,), 0), buf27, buf29, reinterpret_tensor(buf33, (16,), (1,), 0), buf39, reinterpret_tensor(buf30, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf23, (1, 16, 1, 1), (16, 1, 1, 1), 0), buf40) class resBlock(nn.Module): def __init__(self, channelDepth, windowSize=3): super(resBlock, self).__init__() padding = math.floor(windowSize / 2) self.conv1 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, padding) self.conv2 = nn.Conv2d(channelDepth, channelDepth, windowSize, 1, padding) self.IN_conv = nn.InstanceNorm2d(channelDepth, track_running_stats= False, affine=False) def forward(self, x): res = x x = F.relu(self.IN_conv(self.conv1(x))) x = self.IN_conv(self.conv2(x)) x = F.relu(x + res) return x class lrBLock_l2New(nn.Module): def __init__(self, channelDepth, windowSize=3): super(lrBLock_l2New, self).__init__() math.floor(windowSize / 2) self.res_l2 = resBlock(channelDepth, windowSize) self.res_l1 = resBlock(channelDepth, windowSize) def forward(self, input_0): primals_2 = self.res_l2.conv1.weight primals_3 = self.res_l2.conv1.bias primals_4 = self.res_l2.conv2.weight primals_5 = self.res_l2.conv2.bias primals_6 = self.res_l1.conv1.weight primals_7 = self.res_l1.conv1.bias primals_8 = self.res_l1.conv2.weight primals_9 = self.res_l1.conv2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
SeokjaeLIM/DSLR-release
lrBLock_l2
false
8,758
[ "Apache-2.0" ]
14
861429482faf50ee3d6570948af8c48df1fc7f43
https://github.com/SeokjaeLIM/DSLR-release/tree/861429482faf50ee3d6570948af8c48df1fc7f43
NetVLAD
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/u3/cu37kuzlgpwonwye6xqtjvxt5kv252foqxbw5zhbogqli35uxeev.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # x => pow_1, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) triton_red_fused_linalg_vector_norm_0 = async_compile.triton('triton_red_fused_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[16384, 128], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 16384 rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = (xindex // 4096) _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (524288*x1)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + (x3), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/wq/cwq7qfgyxqxicmg3fonn5xsvt5ynmcr7p2tzqkxrtriri6m6lps7.py # Topologically Sorted Source Nodes: [x, residual_2, residual_4, residual_6, residual_8, residual_10, residual_12, residual_14, residual_16, residual_18, residual_20, residual_22, residual_24, residual_26, residual_28, residual_30, residual_32, residual_34, residual_36, residual_38, residual_40, residual_42, residual_44, residual_46, residual_48, residual_50, residual_52, residual_54, residual_56, residual_58, residual_60, residual_62, residual_64, residual_66, residual_68, residual_70, residual_72, residual_74, residual_76, residual_78, residual_80, residual_82, residual_84, residual_86, residual_88, residual_90, residual_92, residual_94, residual_96, residual_98, residual_100, residual_102, residual_104, residual_106, residual_108, residual_110, residual_112, residual_114, residual_116, residual_118, residual_120, residual_122, residual_124, residual_126], Original ATen: [aten.div, aten.sub] # Source node to ATen node mapping: # residual_10 => sub_6 # residual_100 => sub_51 # residual_102 => sub_52 # residual_104 => sub_53 # residual_106 => sub_54 # residual_108 => sub_55 # residual_110 => sub_56 # residual_112 => sub_57 # residual_114 => sub_58 # residual_116 => sub_59 # residual_118 => sub_60 # residual_12 => sub_7 # residual_120 => sub_61 # residual_122 => sub_62 # residual_124 => sub_63 # residual_126 => sub_64 # residual_14 => sub_8 # residual_16 => sub_9 # residual_18 => sub_10 # residual_2 => sub_2 # residual_20 => sub_11 # residual_22 => sub_12 # residual_24 => sub_13 # residual_26 => sub_14 # residual_28 => sub_15 # residual_30 => sub_16 # residual_32 => sub_17 # residual_34 => sub_18 # residual_36 => sub_19 # residual_38 => sub_20 # residual_4 => sub_3 # residual_40 => sub_21 # residual_42 => sub_22 # residual_44 => sub_23 # residual_46 => sub_24 # residual_48 => sub_25 # residual_50 => sub_26 # residual_52 => sub_27 # residual_54 => sub_28 # residual_56 => sub_29 # residual_58 => sub_30 # residual_6 => sub_4 # residual_60 => sub_31 # residual_62 => sub_32 # residual_64 => sub_33 # residual_66 => sub_34 # residual_68 => sub_35 # residual_70 => sub_36 # residual_72 => sub_37 # residual_74 => sub_38 # residual_76 => sub_39 # residual_78 => sub_40 # residual_8 => sub_5 # residual_80 => sub_41 # residual_82 => sub_42 # residual_84 => sub_43 # residual_86 => sub_44 # residual_88 => sub_45 # residual_90 => sub_46 # residual_92 => sub_47 # residual_94 => sub_48 # residual_96 => sub_49 # residual_98 => sub_50 # x => div # Graph fragment: # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_4), kwargs = {}) # %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_7), kwargs = {}) # %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_10), kwargs = {}) # %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_13), kwargs = {}) # %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_16), kwargs = {}) # %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_19), kwargs = {}) # %sub_8 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_22), kwargs = {}) # %sub_9 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_25), kwargs = {}) # %sub_10 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_28), kwargs = {}) # %sub_11 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_31), kwargs = {}) # %sub_12 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_34), kwargs = {}) # %sub_13 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_37), kwargs = {}) # %sub_14 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_40), kwargs = {}) # %sub_15 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_43), kwargs = {}) # %sub_16 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_46), kwargs = {}) # %sub_17 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_49), kwargs = {}) # %sub_18 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_52), kwargs = {}) # %sub_19 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_55), kwargs = {}) # %sub_20 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_58), kwargs = {}) # %sub_21 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_61), kwargs = {}) # %sub_22 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_64), kwargs = {}) # %sub_23 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_67), kwargs = {}) # %sub_24 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_70), kwargs = {}) # %sub_25 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_73), kwargs = {}) # %sub_26 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_76), kwargs = {}) # %sub_27 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_79), kwargs = {}) # %sub_28 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_82), kwargs = {}) # %sub_29 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_85), kwargs = {}) # %sub_30 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_88), kwargs = {}) # %sub_31 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_91), kwargs = {}) # %sub_32 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_94), kwargs = {}) # %sub_33 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_97), kwargs = {}) # %sub_34 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_100), kwargs = {}) # %sub_35 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_103), kwargs = {}) # %sub_36 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_106), kwargs = {}) # %sub_37 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_109), kwargs = {}) # %sub_38 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_112), kwargs = {}) # %sub_39 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_115), kwargs = {}) # %sub_40 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_118), kwargs = {}) # %sub_41 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_121), kwargs = {}) # %sub_42 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_124), kwargs = {}) # %sub_43 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_127), kwargs = {}) # %sub_44 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_130), kwargs = {}) # %sub_45 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_133), kwargs = {}) # %sub_46 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_136), kwargs = {}) # %sub_47 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_139), kwargs = {}) # %sub_48 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_142), kwargs = {}) # %sub_49 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_145), kwargs = {}) # %sub_50 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_148), kwargs = {}) # %sub_51 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_151), kwargs = {}) # %sub_52 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_154), kwargs = {}) # %sub_53 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_157), kwargs = {}) # %sub_54 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_160), kwargs = {}) # %sub_55 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_163), kwargs = {}) # %sub_56 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_166), kwargs = {}) # %sub_57 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_169), kwargs = {}) # %sub_58 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_172), kwargs = {}) # %sub_59 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_175), kwargs = {}) # %sub_60 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_178), kwargs = {}) # %sub_61 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_181), kwargs = {}) # %sub_62 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_184), kwargs = {}) # %sub_63 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_187), kwargs = {}) # %sub_64 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_190), kwargs = {}) triton_poi_fused_div_sub_1 = async_compile.triton('triton_poi_fused_div_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: '*fp32', 66: '*fp32', 67: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 65, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61, out_ptr62, out_ptr63, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = (xindex // 524288) x1 = (xindex // 4096) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last') tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last') tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last') tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last') tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last') tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last') tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last') tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last') tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last') tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last') tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last') tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last') tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last') tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last') tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last') tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last') tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last') tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last') tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last') tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last') tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last') tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last') tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last') tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last') tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last') tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last') tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last') tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last') tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last') tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last') tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last') tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last') tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp7 = tmp5 - tmp6 tmp9 = tmp5 - tmp8 tmp11 = tmp5 - tmp10 tmp13 = tmp5 - tmp12 tmp15 = tmp5 - tmp14 tmp17 = tmp5 - tmp16 tmp19 = tmp5 - tmp18 tmp21 = tmp5 - tmp20 tmp23 = tmp5 - tmp22 tmp25 = tmp5 - tmp24 tmp27 = tmp5 - tmp26 tmp29 = tmp5 - tmp28 tmp31 = tmp5 - tmp30 tmp33 = tmp5 - tmp32 tmp35 = tmp5 - tmp34 tmp37 = tmp5 - tmp36 tmp39 = tmp5 - tmp38 tmp41 = tmp5 - tmp40 tmp43 = tmp5 - tmp42 tmp45 = tmp5 - tmp44 tmp47 = tmp5 - tmp46 tmp49 = tmp5 - tmp48 tmp51 = tmp5 - tmp50 tmp53 = tmp5 - tmp52 tmp55 = tmp5 - tmp54 tmp57 = tmp5 - tmp56 tmp59 = tmp5 - tmp58 tmp61 = tmp5 - tmp60 tmp63 = tmp5 - tmp62 tmp65 = tmp5 - tmp64 tmp67 = tmp5 - tmp66 tmp69 = tmp5 - tmp68 tmp71 = tmp5 - tmp70 tmp73 = tmp5 - tmp72 tmp75 = tmp5 - tmp74 tmp77 = tmp5 - tmp76 tmp79 = tmp5 - tmp78 tmp81 = tmp5 - tmp80 tmp83 = tmp5 - tmp82 tmp85 = tmp5 - tmp84 tmp87 = tmp5 - tmp86 tmp89 = tmp5 - tmp88 tmp91 = tmp5 - tmp90 tmp93 = tmp5 - tmp92 tmp95 = tmp5 - tmp94 tmp97 = tmp5 - tmp96 tmp99 = tmp5 - tmp98 tmp101 = tmp5 - tmp100 tmp103 = tmp5 - tmp102 tmp105 = tmp5 - tmp104 tmp107 = tmp5 - tmp106 tmp109 = tmp5 - tmp108 tmp111 = tmp5 - tmp110 tmp113 = tmp5 - tmp112 tmp115 = tmp5 - tmp114 tmp117 = tmp5 - tmp116 tmp119 = tmp5 - tmp118 tmp121 = tmp5 - tmp120 tmp123 = tmp5 - tmp122 tmp125 = tmp5 - tmp124 tmp127 = tmp5 - tmp126 tmp129 = tmp5 - tmp128 tmp131 = tmp5 - tmp130 tl.store(out_ptr0 + (x3), tmp5, None) tl.store(out_ptr1 + (x3), tmp7, None) tl.store(out_ptr2 + (x3), tmp9, None) tl.store(out_ptr3 + (x3), tmp11, None) tl.store(out_ptr4 + (x3), tmp13, None) tl.store(out_ptr5 + (x3), tmp15, None) tl.store(out_ptr6 + (x3), tmp17, None) tl.store(out_ptr7 + (x3), tmp19, None) tl.store(out_ptr8 + (x3), tmp21, None) tl.store(out_ptr9 + (x3), tmp23, None) tl.store(out_ptr10 + (x3), tmp25, None) tl.store(out_ptr11 + (x3), tmp27, None) tl.store(out_ptr12 + (x3), tmp29, None) tl.store(out_ptr13 + (x3), tmp31, None) tl.store(out_ptr14 + (x3), tmp33, None) tl.store(out_ptr15 + (x3), tmp35, None) tl.store(out_ptr16 + (x3), tmp37, None) tl.store(out_ptr17 + (x3), tmp39, None) tl.store(out_ptr18 + (x3), tmp41, None) tl.store(out_ptr19 + (x3), tmp43, None) tl.store(out_ptr20 + (x3), tmp45, None) tl.store(out_ptr21 + (x3), tmp47, None) tl.store(out_ptr22 + (x3), tmp49, None) tl.store(out_ptr23 + (x3), tmp51, None) tl.store(out_ptr24 + (x3), tmp53, None) tl.store(out_ptr25 + (x3), tmp55, None) tl.store(out_ptr26 + (x3), tmp57, None) tl.store(out_ptr27 + (x3), tmp59, None) tl.store(out_ptr28 + (x3), tmp61, None) tl.store(out_ptr29 + (x3), tmp63, None) tl.store(out_ptr30 + (x3), tmp65, None) tl.store(out_ptr31 + (x3), tmp67, None) tl.store(out_ptr32 + (x3), tmp69, None) tl.store(out_ptr33 + (x3), tmp71, None) tl.store(out_ptr34 + (x3), tmp73, None) tl.store(out_ptr35 + (x3), tmp75, None) tl.store(out_ptr36 + (x3), tmp77, None) tl.store(out_ptr37 + (x3), tmp79, None) tl.store(out_ptr38 + (x3), tmp81, None) tl.store(out_ptr39 + (x3), tmp83, None) tl.store(out_ptr40 + (x3), tmp85, None) tl.store(out_ptr41 + (x3), tmp87, None) tl.store(out_ptr42 + (x3), tmp89, None) tl.store(out_ptr43 + (x3), tmp91, None) tl.store(out_ptr44 + (x3), tmp93, None) tl.store(out_ptr45 + (x3), tmp95, None) tl.store(out_ptr46 + (x3), tmp97, None) tl.store(out_ptr47 + (x3), tmp99, None) tl.store(out_ptr48 + (x3), tmp101, None) tl.store(out_ptr49 + (x3), tmp103, None) tl.store(out_ptr50 + (x3), tmp105, None) tl.store(out_ptr51 + (x3), tmp107, None) tl.store(out_ptr52 + (x3), tmp109, None) tl.store(out_ptr53 + (x3), tmp111, None) tl.store(out_ptr54 + (x3), tmp113, None) tl.store(out_ptr55 + (x3), tmp115, None) tl.store(out_ptr56 + (x3), tmp117, None) tl.store(out_ptr57 + (x3), tmp119, None) tl.store(out_ptr58 + (x3), tmp121, None) tl.store(out_ptr59 + (x3), tmp123, None) tl.store(out_ptr60 + (x3), tmp125, None) tl.store(out_ptr61 + (x3), tmp127, None) tl.store(out_ptr62 + (x3), tmp129, None) tl.store(out_ptr63 + (x3), tmp131, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/mz/cmz3teqnkce5uneqwkd4urp7bav43oqn2ansodmxcehwjmm5hwa4.py # Topologically Sorted Source Nodes: [hard_assign, soft_assign_1], Original ATen: [aten.argmax, aten._softmax] # Source node to ATen node mapping: # hard_assign => argmax # soft_assign_1 => amax, exp, sub, sum_2 # Graph fragment: # %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%view, 1), kwargs = {}) # %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_per_fused__softmax_argmax_2 = async_compile.triton('triton_per_fused__softmax_argmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16384, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_argmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_argmax_2(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16384 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = (xindex // 4096) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (262144*x1)), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(rindex, tmp1.shape) _, tmp2_tmp = triton_helpers.max_with_index(tmp1, tmp3, 1) tmp2 = tmp2_tmp[:, None] tmp5 = triton_helpers.max2(tmp1, 1)[:, None] tmp6 = tmp0 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + (x3), tmp2, None) tl.store(out_ptr1 + (x3), tmp5, None) tl.store(out_ptr2 + (x3), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/hv/chvod4i5xws3vo3y5v4kvbqrbhpe24n7uq3izupyixha3jxeonu3.py # Topologically Sorted Source Nodes: [residual, residual_1, sum_1, residual_3, sum_2, residual_5, sum_3, residual_7, sum_4, residual_9, sum_5, residual_11, sum_6, residual_13, sum_7, residual_15, sum_8, residual_17, sum_9, residual_19, sum_10, residual_21, sum_11, residual_23, sum_12, residual_25, sum_13, residual_27, sum_14, residual_29, sum_15, residual_31, sum_16, residual_33, sum_17, residual_35, sum_18, residual_37, sum_19, residual_39, sum_20, residual_41, sum_21, residual_43, sum_22, residual_45, sum_23, residual_47, sum_24, residual_49, sum_25, residual_51, sum_26, residual_53, sum_27, residual_55, sum_28, residual_57, sum_29], Original ATen: [aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # residual => sub_1 # residual_1 => mul # residual_11 => mul_5 # residual_13 => mul_6 # residual_15 => mul_7 # residual_17 => mul_8 # residual_19 => mul_9 # residual_21 => mul_10 # residual_23 => mul_11 # residual_25 => mul_12 # residual_27 => mul_13 # residual_29 => mul_14 # residual_3 => mul_1 # residual_31 => mul_15 # residual_33 => mul_16 # residual_35 => mul_17 # residual_37 => mul_18 # residual_39 => mul_19 # residual_41 => mul_20 # residual_43 => mul_21 # residual_45 => mul_22 # residual_47 => mul_23 # residual_49 => mul_24 # residual_5 => mul_2 # residual_51 => mul_25 # residual_53 => mul_26 # residual_55 => mul_27 # residual_57 => mul_28 # residual_7 => mul_3 # residual_9 => mul_4 # sum_1 => sum_3 # sum_10 => sum_12 # sum_11 => sum_13 # sum_12 => sum_14 # sum_13 => sum_15 # sum_14 => sum_16 # sum_15 => sum_17 # sum_16 => sum_18 # sum_17 => sum_19 # sum_18 => sum_20 # sum_19 => sum_21 # sum_2 => sum_4 # sum_20 => sum_22 # sum_21 => sum_23 # sum_22 => sum_24 # sum_23 => sum_25 # sum_24 => sum_26 # sum_25 => sum_27 # sum_26 => sum_28 # sum_27 => sum_29 # sum_28 => sum_30 # sum_29 => sum_31 # sum_3 => sum_5 # sum_4 => sum_6 # sum_5 => sum_7 # sum_6 => sum_8 # sum_7 => sum_9 # sum_8 => sum_10 # sum_9 => sum_11 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %unsqueeze_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %unsqueeze_5), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %unsqueeze_8), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %unsqueeze_11), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [-1]), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %unsqueeze_14), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [-1]), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %unsqueeze_17), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [-1]), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %unsqueeze_20), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [-1]), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %unsqueeze_23), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [-1]), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %unsqueeze_26), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_8, [-1]), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %unsqueeze_29), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_9, [-1]), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %unsqueeze_32), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_10, [-1]), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %unsqueeze_35), kwargs = {}) # %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_11, [-1]), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %unsqueeze_38), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_12, [-1]), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %unsqueeze_41), kwargs = {}) # %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_13, [-1]), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_15, %unsqueeze_44), kwargs = {}) # %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_14, [-1]), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %unsqueeze_47), kwargs = {}) # %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_15, [-1]), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %unsqueeze_50), kwargs = {}) # %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_16, [-1]), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %unsqueeze_53), kwargs = {}) # %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_17, [-1]), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %unsqueeze_56), kwargs = {}) # %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_18, [-1]), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %unsqueeze_59), kwargs = {}) # %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_19, [-1]), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, %unsqueeze_62), kwargs = {}) # %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_20, [-1]), kwargs = {}) # %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_22, %unsqueeze_65), kwargs = {}) # %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_21, [-1]), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %unsqueeze_68), kwargs = {}) # %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_22, [-1]), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %unsqueeze_71), kwargs = {}) # %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_23, [-1]), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %unsqueeze_74), kwargs = {}) # %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_24, [-1]), kwargs = {}) # %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_26, %unsqueeze_77), kwargs = {}) # %sum_28 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_25, [-1]), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %unsqueeze_80), kwargs = {}) # %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_26, [-1]), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_28, %unsqueeze_83), kwargs = {}) # %sum_30 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_27, [-1]), kwargs = {}) # %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_29, %unsqueeze_86), kwargs = {}) # %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_28, [-1]), kwargs = {}) triton_red_fused_mul_sub_sum_3 = async_compile.triton('triton_red_fused_mul_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: 'i32', 63: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 61, 'num_reduction': 29, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 128 tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') x1 = (xindex // 128) _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp3 = tl.load(in_ptr2 + (r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp14 = tl.load(in_ptr2 + (4096 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp23 = tl.load(in_ptr2 + (8192 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp32 = tl.load(in_ptr2 + (12288 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp41 = tl.load(in_ptr2 + (16384 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp49 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp50 = tl.load(in_ptr2 + (20480 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp58 = tl.load(in_ptr10 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp59 = tl.load(in_ptr2 + (24576 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp67 = tl.load(in_ptr11 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp68 = tl.load(in_ptr2 + (28672 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp76 = tl.load(in_ptr12 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp77 = tl.load(in_ptr2 + (32768 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tl.load(in_ptr13 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp86 = tl.load(in_ptr2 + (36864 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tl.load(in_ptr14 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp95 = tl.load(in_ptr2 + (40960 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp103 = tl.load(in_ptr15 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp104 = tl.load(in_ptr2 + (45056 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp112 = tl.load(in_ptr16 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp113 = tl.load(in_ptr2 + (49152 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp121 = tl.load(in_ptr17 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp122 = tl.load(in_ptr2 + (53248 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp130 = tl.load(in_ptr18 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp131 = tl.load(in_ptr2 + (57344 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp139 = tl.load(in_ptr19 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp140 = tl.load(in_ptr2 + (61440 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp148 = tl.load(in_ptr20 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp149 = tl.load(in_ptr2 + (65536 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp157 = tl.load(in_ptr21 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp158 = tl.load(in_ptr2 + (69632 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tl.load(in_ptr22 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp167 = tl.load(in_ptr2 + (73728 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp175 = tl.load(in_ptr23 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp176 = tl.load(in_ptr2 + (77824 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp184 = tl.load(in_ptr24 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp185 = tl.load(in_ptr2 + (81920 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp193 = tl.load(in_ptr25 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp194 = tl.load(in_ptr2 + (86016 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp202 = tl.load(in_ptr26 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp203 = tl.load(in_ptr2 + (90112 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp211 = tl.load(in_ptr27 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp212 = tl.load(in_ptr2 + (94208 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp220 = tl.load(in_ptr28 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp221 = tl.load(in_ptr2 + (98304 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp229 = tl.load(in_ptr29 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp230 = tl.load(in_ptr2 + (102400 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tl.load(in_ptr30 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp239 = tl.load(in_ptr2 + (106496 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp247 = tl.load(in_ptr31 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp248 = tl.load(in_ptr2 + (110592 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp256 = tl.load(in_ptr32 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp257 = tl.load(in_ptr2 + (114688 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask & xmask, tmp12, _tmp11) tmp15 = tmp14 - tmp4 tmp16 = tl_math.exp(tmp15) tmp17 = tmp16 / tmp7 tmp18 = tmp13 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask & xmask, tmp21, _tmp20) tmp24 = tmp23 - tmp4 tmp25 = tl_math.exp(tmp24) tmp26 = tmp25 / tmp7 tmp27 = tmp22 * tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp33 = tmp32 - tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp34 / tmp7 tmp36 = tmp31 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = _tmp38 + tmp37 _tmp38 = tl.where(rmask & xmask, tmp39, _tmp38) tmp42 = tmp41 - tmp4 tmp43 = tl_math.exp(tmp42) tmp44 = tmp43 / tmp7 tmp45 = tmp40 * tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp48 = _tmp47 + tmp46 _tmp47 = tl.where(rmask & xmask, tmp48, _tmp47) tmp51 = tmp50 - tmp4 tmp52 = tl_math.exp(tmp51) tmp53 = tmp52 / tmp7 tmp54 = tmp49 * tmp53 tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp57 = _tmp56 + tmp55 _tmp56 = tl.where(rmask & xmask, tmp57, _tmp56) tmp60 = tmp59 - tmp4 tmp61 = tl_math.exp(tmp60) tmp62 = tmp61 / tmp7 tmp63 = tmp58 * tmp62 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask & xmask, tmp66, _tmp65) tmp69 = tmp68 - tmp4 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 / tmp7 tmp72 = tmp67 * tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = _tmp74 + tmp73 _tmp74 = tl.where(rmask & xmask, tmp75, _tmp74) tmp78 = tmp77 - tmp4 tmp79 = tl_math.exp(tmp78) tmp80 = tmp79 / tmp7 tmp81 = tmp76 * tmp80 tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK]) tmp84 = _tmp83 + tmp82 _tmp83 = tl.where(rmask & xmask, tmp84, _tmp83) tmp87 = tmp86 - tmp4 tmp88 = tl_math.exp(tmp87) tmp89 = tmp88 / tmp7 tmp90 = tmp85 * tmp89 tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK]) tmp93 = _tmp92 + tmp91 _tmp92 = tl.where(rmask & xmask, tmp93, _tmp92) tmp96 = tmp95 - tmp4 tmp97 = tl_math.exp(tmp96) tmp98 = tmp97 / tmp7 tmp99 = tmp94 * tmp98 tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK]) tmp102 = _tmp101 + tmp100 _tmp101 = tl.where(rmask & xmask, tmp102, _tmp101) tmp105 = tmp104 - tmp4 tmp106 = tl_math.exp(tmp105) tmp107 = tmp106 / tmp7 tmp108 = tmp103 * tmp107 tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK]) tmp111 = _tmp110 + tmp109 _tmp110 = tl.where(rmask & xmask, tmp111, _tmp110) tmp114 = tmp113 - tmp4 tmp115 = tl_math.exp(tmp114) tmp116 = tmp115 / tmp7 tmp117 = tmp112 * tmp116 tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK]) tmp120 = _tmp119 + tmp118 _tmp119 = tl.where(rmask & xmask, tmp120, _tmp119) tmp123 = tmp122 - tmp4 tmp124 = tl_math.exp(tmp123) tmp125 = tmp124 / tmp7 tmp126 = tmp121 * tmp125 tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK]) tmp129 = _tmp128 + tmp127 _tmp128 = tl.where(rmask & xmask, tmp129, _tmp128) tmp132 = tmp131 - tmp4 tmp133 = tl_math.exp(tmp132) tmp134 = tmp133 / tmp7 tmp135 = tmp130 * tmp134 tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK]) tmp138 = _tmp137 + tmp136 _tmp137 = tl.where(rmask & xmask, tmp138, _tmp137) tmp141 = tmp140 - tmp4 tmp142 = tl_math.exp(tmp141) tmp143 = tmp142 / tmp7 tmp144 = tmp139 * tmp143 tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK]) tmp147 = _tmp146 + tmp145 _tmp146 = tl.where(rmask & xmask, tmp147, _tmp146) tmp150 = tmp149 - tmp4 tmp151 = tl_math.exp(tmp150) tmp152 = tmp151 / tmp7 tmp153 = tmp148 * tmp152 tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK]) tmp156 = _tmp155 + tmp154 _tmp155 = tl.where(rmask & xmask, tmp156, _tmp155) tmp159 = tmp158 - tmp4 tmp160 = tl_math.exp(tmp159) tmp161 = tmp160 / tmp7 tmp162 = tmp157 * tmp161 tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK]) tmp165 = _tmp164 + tmp163 _tmp164 = tl.where(rmask & xmask, tmp165, _tmp164) tmp168 = tmp167 - tmp4 tmp169 = tl_math.exp(tmp168) tmp170 = tmp169 / tmp7 tmp171 = tmp166 * tmp170 tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK]) tmp174 = _tmp173 + tmp172 _tmp173 = tl.where(rmask & xmask, tmp174, _tmp173) tmp177 = tmp176 - tmp4 tmp178 = tl_math.exp(tmp177) tmp179 = tmp178 / tmp7 tmp180 = tmp175 * tmp179 tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK]) tmp183 = _tmp182 + tmp181 _tmp182 = tl.where(rmask & xmask, tmp183, _tmp182) tmp186 = tmp185 - tmp4 tmp187 = tl_math.exp(tmp186) tmp188 = tmp187 / tmp7 tmp189 = tmp184 * tmp188 tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK]) tmp192 = _tmp191 + tmp190 _tmp191 = tl.where(rmask & xmask, tmp192, _tmp191) tmp195 = tmp194 - tmp4 tmp196 = tl_math.exp(tmp195) tmp197 = tmp196 / tmp7 tmp198 = tmp193 * tmp197 tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK]) tmp201 = _tmp200 + tmp199 _tmp200 = tl.where(rmask & xmask, tmp201, _tmp200) tmp204 = tmp203 - tmp4 tmp205 = tl_math.exp(tmp204) tmp206 = tmp205 / tmp7 tmp207 = tmp202 * tmp206 tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK]) tmp210 = _tmp209 + tmp208 _tmp209 = tl.where(rmask & xmask, tmp210, _tmp209) tmp213 = tmp212 - tmp4 tmp214 = tl_math.exp(tmp213) tmp215 = tmp214 / tmp7 tmp216 = tmp211 * tmp215 tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK]) tmp219 = _tmp218 + tmp217 _tmp218 = tl.where(rmask & xmask, tmp219, _tmp218) tmp222 = tmp221 - tmp4 tmp223 = tl_math.exp(tmp222) tmp224 = tmp223 / tmp7 tmp225 = tmp220 * tmp224 tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK]) tmp228 = _tmp227 + tmp226 _tmp227 = tl.where(rmask & xmask, tmp228, _tmp227) tmp231 = tmp230 - tmp4 tmp232 = tl_math.exp(tmp231) tmp233 = tmp232 / tmp7 tmp234 = tmp229 * tmp233 tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK]) tmp237 = _tmp236 + tmp235 _tmp236 = tl.where(rmask & xmask, tmp237, _tmp236) tmp240 = tmp239 - tmp4 tmp241 = tl_math.exp(tmp240) tmp242 = tmp241 / tmp7 tmp243 = tmp238 * tmp242 tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK]) tmp246 = _tmp245 + tmp244 _tmp245 = tl.where(rmask & xmask, tmp246, _tmp245) tmp249 = tmp248 - tmp4 tmp250 = tl_math.exp(tmp249) tmp251 = tmp250 / tmp7 tmp252 = tmp247 * tmp251 tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK]) tmp255 = _tmp254 + tmp253 _tmp254 = tl.where(rmask & xmask, tmp255, _tmp254) tmp258 = tmp257 - tmp4 tmp259 = tl_math.exp(tmp258) tmp260 = tmp259 / tmp7 tmp261 = tmp256 * tmp260 tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK]) tmp264 = _tmp263 + tmp262 _tmp263 = tl.where(rmask & xmask, tmp264, _tmp263) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + (x3), tmp11, xmask) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr1 + (x3), tmp20, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr2 + (x3), tmp29, xmask) tmp38 = tl.sum(_tmp38, 1)[:, None] tl.store(out_ptr3 + (x3), tmp38, xmask) tmp47 = tl.sum(_tmp47, 1)[:, None] tl.store(out_ptr4 + (x3), tmp47, xmask) tmp56 = tl.sum(_tmp56, 1)[:, None] tl.store(out_ptr5 + (x3), tmp56, xmask) tmp65 = tl.sum(_tmp65, 1)[:, None] tl.store(out_ptr6 + (x3), tmp65, xmask) tmp74 = tl.sum(_tmp74, 1)[:, None] tl.store(out_ptr7 + (x3), tmp74, xmask) tmp83 = tl.sum(_tmp83, 1)[:, None] tl.store(out_ptr8 + (x3), tmp83, xmask) tmp92 = tl.sum(_tmp92, 1)[:, None] tl.store(out_ptr9 + (x3), tmp92, xmask) tmp101 = tl.sum(_tmp101, 1)[:, None] tl.store(out_ptr10 + (x3), tmp101, xmask) tmp110 = tl.sum(_tmp110, 1)[:, None] tl.store(out_ptr11 + (x3), tmp110, xmask) tmp119 = tl.sum(_tmp119, 1)[:, None] tl.store(out_ptr12 + (x3), tmp119, xmask) tmp128 = tl.sum(_tmp128, 1)[:, None] tl.store(out_ptr13 + (x3), tmp128, xmask) tmp137 = tl.sum(_tmp137, 1)[:, None] tl.store(out_ptr14 + (x3), tmp137, xmask) tmp146 = tl.sum(_tmp146, 1)[:, None] tl.store(out_ptr15 + (x3), tmp146, xmask) tmp155 = tl.sum(_tmp155, 1)[:, None] tl.store(out_ptr16 + (x3), tmp155, xmask) tmp164 = tl.sum(_tmp164, 1)[:, None] tl.store(out_ptr17 + (x3), tmp164, xmask) tmp173 = tl.sum(_tmp173, 1)[:, None] tl.store(out_ptr18 + (x3), tmp173, xmask) tmp182 = tl.sum(_tmp182, 1)[:, None] tl.store(out_ptr19 + (x3), tmp182, xmask) tmp191 = tl.sum(_tmp191, 1)[:, None] tl.store(out_ptr20 + (x3), tmp191, xmask) tmp200 = tl.sum(_tmp200, 1)[:, None] tl.store(out_ptr21 + (x3), tmp200, xmask) tmp209 = tl.sum(_tmp209, 1)[:, None] tl.store(out_ptr22 + (x3), tmp209, xmask) tmp218 = tl.sum(_tmp218, 1)[:, None] tl.store(out_ptr23 + (x3), tmp218, xmask) tmp227 = tl.sum(_tmp227, 1)[:, None] tl.store(out_ptr24 + (x3), tmp227, xmask) tmp236 = tl.sum(_tmp236, 1)[:, None] tl.store(out_ptr25 + (x3), tmp236, xmask) tmp245 = tl.sum(_tmp245, 1)[:, None] tl.store(out_ptr26 + (x3), tmp245, xmask) tmp254 = tl.sum(_tmp254, 1)[:, None] tl.store(out_ptr27 + (x3), tmp254, xmask) tmp263 = tl.sum(_tmp263, 1)[:, None] tl.store(out_ptr28 + (x3), tmp263, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/y2/cy2iowezi666txr2ot26oju5hlurnwvyizjcwcevdg7sddbv4msq.py # Topologically Sorted Source Nodes: [residual_59, sum_30, residual_61, sum_31, residual_63, sum_32, residual_65, sum_33, residual_67, sum_34, residual_69, sum_35, residual_71, sum_36, residual_73, sum_37, residual_75, sum_38, residual_77, sum_39, residual_79, sum_40, residual_81, sum_41, residual_83, sum_42, residual_85, sum_43, residual_87, sum_44, residual_89, sum_45, residual_91, sum_46, residual_93, sum_47, residual_95, sum_48, residual_97, sum_49, residual_99, sum_50, residual_101, sum_51, residual_103, sum_52, residual_105, sum_53, residual_107, sum_54, residual_109, sum_55, residual_111, sum_56, residual_113, sum_57], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # residual_101 => mul_50 # residual_103 => mul_51 # residual_105 => mul_52 # residual_107 => mul_53 # residual_109 => mul_54 # residual_111 => mul_55 # residual_113 => mul_56 # residual_59 => mul_29 # residual_61 => mul_30 # residual_63 => mul_31 # residual_65 => mul_32 # residual_67 => mul_33 # residual_69 => mul_34 # residual_71 => mul_35 # residual_73 => mul_36 # residual_75 => mul_37 # residual_77 => mul_38 # residual_79 => mul_39 # residual_81 => mul_40 # residual_83 => mul_41 # residual_85 => mul_42 # residual_87 => mul_43 # residual_89 => mul_44 # residual_91 => mul_45 # residual_93 => mul_46 # residual_95 => mul_47 # residual_97 => mul_48 # residual_99 => mul_49 # sum_30 => sum_32 # sum_31 => sum_33 # sum_32 => sum_34 # sum_33 => sum_35 # sum_34 => sum_36 # sum_35 => sum_37 # sum_36 => sum_38 # sum_37 => sum_39 # sum_38 => sum_40 # sum_39 => sum_41 # sum_40 => sum_42 # sum_41 => sum_43 # sum_42 => sum_44 # sum_43 => sum_45 # sum_44 => sum_46 # sum_45 => sum_47 # sum_46 => sum_48 # sum_47 => sum_49 # sum_48 => sum_50 # sum_49 => sum_51 # sum_50 => sum_52 # sum_51 => sum_53 # sum_52 => sum_54 # sum_53 => sum_55 # sum_54 => sum_56 # sum_55 => sum_57 # sum_56 => sum_58 # sum_57 => sum_59 # Graph fragment: # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_30, %unsqueeze_89), kwargs = {}) # %sum_32 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_29, [-1]), kwargs = {}) # %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %unsqueeze_92), kwargs = {}) # %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_30, [-1]), kwargs = {}) # %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %unsqueeze_95), kwargs = {}) # %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_31, [-1]), kwargs = {}) # %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_33, %unsqueeze_98), kwargs = {}) # %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_32, [-1]), kwargs = {}) # %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_34, %unsqueeze_101), kwargs = {}) # %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_33, [-1]), kwargs = {}) # %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_35, %unsqueeze_104), kwargs = {}) # %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_34, [-1]), kwargs = {}) # %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_36, %unsqueeze_107), kwargs = {}) # %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_35, [-1]), kwargs = {}) # %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_37, %unsqueeze_110), kwargs = {}) # %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_36, [-1]), kwargs = {}) # %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_38, %unsqueeze_113), kwargs = {}) # %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_37, [-1]), kwargs = {}) # %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_39, %unsqueeze_116), kwargs = {}) # %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_38, [-1]), kwargs = {}) # %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_40, %unsqueeze_119), kwargs = {}) # %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_39, [-1]), kwargs = {}) # %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_41, %unsqueeze_122), kwargs = {}) # %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_40, [-1]), kwargs = {}) # %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_42, %unsqueeze_125), kwargs = {}) # %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_41, [-1]), kwargs = {}) # %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_43, %unsqueeze_128), kwargs = {}) # %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_42, [-1]), kwargs = {}) # %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_44, %unsqueeze_131), kwargs = {}) # %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_43, [-1]), kwargs = {}) # %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_45, %unsqueeze_134), kwargs = {}) # %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_44, [-1]), kwargs = {}) # %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_46, %unsqueeze_137), kwargs = {}) # %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_45, [-1]), kwargs = {}) # %mul_46 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_47, %unsqueeze_140), kwargs = {}) # %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_46, [-1]), kwargs = {}) # %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_48, %unsqueeze_143), kwargs = {}) # %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_47, [-1]), kwargs = {}) # %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_49, %unsqueeze_146), kwargs = {}) # %sum_51 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_48, [-1]), kwargs = {}) # %mul_49 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_50, %unsqueeze_149), kwargs = {}) # %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_49, [-1]), kwargs = {}) # %mul_50 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_51, %unsqueeze_152), kwargs = {}) # %sum_53 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_50, [-1]), kwargs = {}) # %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_52, %unsqueeze_155), kwargs = {}) # %sum_54 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_51, [-1]), kwargs = {}) # %mul_52 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_53, %unsqueeze_158), kwargs = {}) # %sum_55 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_52, [-1]), kwargs = {}) # %mul_53 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_54, %unsqueeze_161), kwargs = {}) # %sum_56 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_53, [-1]), kwargs = {}) # %mul_54 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_55, %unsqueeze_164), kwargs = {}) # %sum_57 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_54, [-1]), kwargs = {}) # %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_56, %unsqueeze_167), kwargs = {}) # %sum_58 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_55, [-1]), kwargs = {}) # %mul_56 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_57, %unsqueeze_170), kwargs = {}) # %sum_59 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_56, [-1]), kwargs = {}) triton_red_fused_mul_sum_4 = async_compile.triton('triton_red_fused_mul_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: 'i32', 60: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 58, 'num_reduction': 28, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = (xindex // 128) _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (118784 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (122880 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (126976 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (131072 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (135168 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (139264 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (143360 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.load(in_ptr10 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp66 = tl.load(in_ptr1 + (147456 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.load(in_ptr11 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp75 = tl.load(in_ptr1 + (151552 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp83 = tl.load(in_ptr12 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp84 = tl.load(in_ptr1 + (155648 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp92 = tl.load(in_ptr13 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp93 = tl.load(in_ptr1 + (159744 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp101 = tl.load(in_ptr14 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp102 = tl.load(in_ptr1 + (163840 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp110 = tl.load(in_ptr15 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp111 = tl.load(in_ptr1 + (167936 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp119 = tl.load(in_ptr16 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp120 = tl.load(in_ptr1 + (172032 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp128 = tl.load(in_ptr17 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp129 = tl.load(in_ptr1 + (176128 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.load(in_ptr18 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp138 = tl.load(in_ptr1 + (180224 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.load(in_ptr19 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp147 = tl.load(in_ptr1 + (184320 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp155 = tl.load(in_ptr20 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp156 = tl.load(in_ptr1 + (188416 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp164 = tl.load(in_ptr21 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp165 = tl.load(in_ptr1 + (192512 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp173 = tl.load(in_ptr22 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp174 = tl.load(in_ptr1 + (196608 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp182 = tl.load(in_ptr23 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp183 = tl.load(in_ptr1 + (200704 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp191 = tl.load(in_ptr24 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp192 = tl.load(in_ptr1 + (204800 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp200 = tl.load(in_ptr25 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp201 = tl.load(in_ptr1 + (208896 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.load(in_ptr26 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp210 = tl.load(in_ptr1 + (212992 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.load(in_ptr27 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp219 = tl.load(in_ptr1 + (217088 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp227 = tl.load(in_ptr28 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp228 = tl.load(in_ptr1 + (221184 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp236 = tl.load(in_ptr29 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp237 = tl.load(in_ptr1 + (225280 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp245 = tl.load(in_ptr30 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp246 = tl.load(in_ptr1 + (229376 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp67 = tmp66 - tmp2 tmp68 = tl_math.exp(tmp67) tmp69 = tmp68 / tmp5 tmp70 = tmp65 * tmp69 tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK]) tmp73 = _tmp72 + tmp71 _tmp72 = tl.where(rmask & xmask, tmp73, _tmp72) tmp76 = tmp75 - tmp2 tmp77 = tl_math.exp(tmp76) tmp78 = tmp77 / tmp5 tmp79 = tmp74 * tmp78 tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK]) tmp82 = _tmp81 + tmp80 _tmp81 = tl.where(rmask & xmask, tmp82, _tmp81) tmp85 = tmp84 - tmp2 tmp86 = tl_math.exp(tmp85) tmp87 = tmp86 / tmp5 tmp88 = tmp83 * tmp87 tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK]) tmp91 = _tmp90 + tmp89 _tmp90 = tl.where(rmask & xmask, tmp91, _tmp90) tmp94 = tmp93 - tmp2 tmp95 = tl_math.exp(tmp94) tmp96 = tmp95 / tmp5 tmp97 = tmp92 * tmp96 tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK]) tmp100 = _tmp99 + tmp98 _tmp99 = tl.where(rmask & xmask, tmp100, _tmp99) tmp103 = tmp102 - tmp2 tmp104 = tl_math.exp(tmp103) tmp105 = tmp104 / tmp5 tmp106 = tmp101 * tmp105 tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK]) tmp109 = _tmp108 + tmp107 _tmp108 = tl.where(rmask & xmask, tmp109, _tmp108) tmp112 = tmp111 - tmp2 tmp113 = tl_math.exp(tmp112) tmp114 = tmp113 / tmp5 tmp115 = tmp110 * tmp114 tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK]) tmp118 = _tmp117 + tmp116 _tmp117 = tl.where(rmask & xmask, tmp118, _tmp117) tmp121 = tmp120 - tmp2 tmp122 = tl_math.exp(tmp121) tmp123 = tmp122 / tmp5 tmp124 = tmp119 * tmp123 tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK]) tmp127 = _tmp126 + tmp125 _tmp126 = tl.where(rmask & xmask, tmp127, _tmp126) tmp130 = tmp129 - tmp2 tmp131 = tl_math.exp(tmp130) tmp132 = tmp131 / tmp5 tmp133 = tmp128 * tmp132 tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK]) tmp136 = _tmp135 + tmp134 _tmp135 = tl.where(rmask & xmask, tmp136, _tmp135) tmp139 = tmp138 - tmp2 tmp140 = tl_math.exp(tmp139) tmp141 = tmp140 / tmp5 tmp142 = tmp137 * tmp141 tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK]) tmp145 = _tmp144 + tmp143 _tmp144 = tl.where(rmask & xmask, tmp145, _tmp144) tmp148 = tmp147 - tmp2 tmp149 = tl_math.exp(tmp148) tmp150 = tmp149 / tmp5 tmp151 = tmp146 * tmp150 tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK]) tmp154 = _tmp153 + tmp152 _tmp153 = tl.where(rmask & xmask, tmp154, _tmp153) tmp157 = tmp156 - tmp2 tmp158 = tl_math.exp(tmp157) tmp159 = tmp158 / tmp5 tmp160 = tmp155 * tmp159 tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK]) tmp163 = _tmp162 + tmp161 _tmp162 = tl.where(rmask & xmask, tmp163, _tmp162) tmp166 = tmp165 - tmp2 tmp167 = tl_math.exp(tmp166) tmp168 = tmp167 / tmp5 tmp169 = tmp164 * tmp168 tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK]) tmp172 = _tmp171 + tmp170 _tmp171 = tl.where(rmask & xmask, tmp172, _tmp171) tmp175 = tmp174 - tmp2 tmp176 = tl_math.exp(tmp175) tmp177 = tmp176 / tmp5 tmp178 = tmp173 * tmp177 tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK]) tmp181 = _tmp180 + tmp179 _tmp180 = tl.where(rmask & xmask, tmp181, _tmp180) tmp184 = tmp183 - tmp2 tmp185 = tl_math.exp(tmp184) tmp186 = tmp185 / tmp5 tmp187 = tmp182 * tmp186 tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK]) tmp190 = _tmp189 + tmp188 _tmp189 = tl.where(rmask & xmask, tmp190, _tmp189) tmp193 = tmp192 - tmp2 tmp194 = tl_math.exp(tmp193) tmp195 = tmp194 / tmp5 tmp196 = tmp191 * tmp195 tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK]) tmp199 = _tmp198 + tmp197 _tmp198 = tl.where(rmask & xmask, tmp199, _tmp198) tmp202 = tmp201 - tmp2 tmp203 = tl_math.exp(tmp202) tmp204 = tmp203 / tmp5 tmp205 = tmp200 * tmp204 tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK]) tmp208 = _tmp207 + tmp206 _tmp207 = tl.where(rmask & xmask, tmp208, _tmp207) tmp211 = tmp210 - tmp2 tmp212 = tl_math.exp(tmp211) tmp213 = tmp212 / tmp5 tmp214 = tmp209 * tmp213 tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK]) tmp217 = _tmp216 + tmp215 _tmp216 = tl.where(rmask & xmask, tmp217, _tmp216) tmp220 = tmp219 - tmp2 tmp221 = tl_math.exp(tmp220) tmp222 = tmp221 / tmp5 tmp223 = tmp218 * tmp222 tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK]) tmp226 = _tmp225 + tmp224 _tmp225 = tl.where(rmask & xmask, tmp226, _tmp225) tmp229 = tmp228 - tmp2 tmp230 = tl_math.exp(tmp229) tmp231 = tmp230 / tmp5 tmp232 = tmp227 * tmp231 tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK]) tmp235 = _tmp234 + tmp233 _tmp234 = tl.where(rmask & xmask, tmp235, _tmp234) tmp238 = tmp237 - tmp2 tmp239 = tl_math.exp(tmp238) tmp240 = tmp239 / tmp5 tmp241 = tmp236 * tmp240 tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK]) tmp244 = _tmp243 + tmp242 _tmp243 = tl.where(rmask & xmask, tmp244, _tmp243) tmp247 = tmp246 - tmp2 tmp248 = tl_math.exp(tmp247) tmp249 = tmp248 / tmp5 tmp250 = tmp245 * tmp249 tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK]) tmp253 = _tmp252 + tmp251 _tmp252 = tl.where(rmask & xmask, tmp253, _tmp252) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + (x3), tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + (x3), tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + (x3), tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + (x3), tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + (x3), tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + (x3), tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + (x3), tmp63, xmask) tmp72 = tl.sum(_tmp72, 1)[:, None] tl.store(out_ptr7 + (x3), tmp72, xmask) tmp81 = tl.sum(_tmp81, 1)[:, None] tl.store(out_ptr8 + (x3), tmp81, xmask) tmp90 = tl.sum(_tmp90, 1)[:, None] tl.store(out_ptr9 + (x3), tmp90, xmask) tmp99 = tl.sum(_tmp99, 1)[:, None] tl.store(out_ptr10 + (x3), tmp99, xmask) tmp108 = tl.sum(_tmp108, 1)[:, None] tl.store(out_ptr11 + (x3), tmp108, xmask) tmp117 = tl.sum(_tmp117, 1)[:, None] tl.store(out_ptr12 + (x3), tmp117, xmask) tmp126 = tl.sum(_tmp126, 1)[:, None] tl.store(out_ptr13 + (x3), tmp126, xmask) tmp135 = tl.sum(_tmp135, 1)[:, None] tl.store(out_ptr14 + (x3), tmp135, xmask) tmp144 = tl.sum(_tmp144, 1)[:, None] tl.store(out_ptr15 + (x3), tmp144, xmask) tmp153 = tl.sum(_tmp153, 1)[:, None] tl.store(out_ptr16 + (x3), tmp153, xmask) tmp162 = tl.sum(_tmp162, 1)[:, None] tl.store(out_ptr17 + (x3), tmp162, xmask) tmp171 = tl.sum(_tmp171, 1)[:, None] tl.store(out_ptr18 + (x3), tmp171, xmask) tmp180 = tl.sum(_tmp180, 1)[:, None] tl.store(out_ptr19 + (x3), tmp180, xmask) tmp189 = tl.sum(_tmp189, 1)[:, None] tl.store(out_ptr20 + (x3), tmp189, xmask) tmp198 = tl.sum(_tmp198, 1)[:, None] tl.store(out_ptr21 + (x3), tmp198, xmask) tmp207 = tl.sum(_tmp207, 1)[:, None] tl.store(out_ptr22 + (x3), tmp207, xmask) tmp216 = tl.sum(_tmp216, 1)[:, None] tl.store(out_ptr23 + (x3), tmp216, xmask) tmp225 = tl.sum(_tmp225, 1)[:, None] tl.store(out_ptr24 + (x3), tmp225, xmask) tmp234 = tl.sum(_tmp234, 1)[:, None] tl.store(out_ptr25 + (x3), tmp234, xmask) tmp243 = tl.sum(_tmp243, 1)[:, None] tl.store(out_ptr26 + (x3), tmp243, xmask) tmp252 = tl.sum(_tmp252, 1)[:, None] tl.store(out_ptr27 + (x3), tmp252, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/2y/c2ycvmz55d2cdgshqzt2ylgrhl3hwv23izoty2oj53exxztlcefi.py # Topologically Sorted Source Nodes: [residual_115, sum_58, residual_117, sum_59, residual_119, sum_60, residual_121, sum_61, residual_123, sum_62, residual_125, sum_63, residual_127, sum_64], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # residual_115 => mul_57 # residual_117 => mul_58 # residual_119 => mul_59 # residual_121 => mul_60 # residual_123 => mul_61 # residual_125 => mul_62 # residual_127 => mul_63 # sum_58 => sum_60 # sum_59 => sum_61 # sum_60 => sum_62 # sum_61 => sum_63 # sum_62 => sum_64 # sum_63 => sum_65 # sum_64 => sum_66 # Graph fragment: # %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_58, %unsqueeze_173), kwargs = {}) # %sum_60 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_57, [-1]), kwargs = {}) # %mul_58 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_59, %unsqueeze_176), kwargs = {}) # %sum_61 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_58, [-1]), kwargs = {}) # %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_60, %unsqueeze_179), kwargs = {}) # %sum_62 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_59, [-1]), kwargs = {}) # %mul_60 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_61, %unsqueeze_182), kwargs = {}) # %sum_63 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_60, [-1]), kwargs = {}) # %mul_61 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_62, %unsqueeze_185), kwargs = {}) # %sum_64 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_61, [-1]), kwargs = {}) # %mul_62 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_63, %unsqueeze_188), kwargs = {}) # %sum_65 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_62, [-1]), kwargs = {}) # %mul_63 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_64, %unsqueeze_191), kwargs = {}) # %sum_66 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_63, [-1]), kwargs = {}) triton_red_fused_mul_sum_5 = async_compile.triton('triton_red_fused_mul_sum_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32', 18: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 7, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = (xindex // 128) _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (233472 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (237568 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (241664 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (245760 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (249856 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (253952 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (258048 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + (x3), tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + (x3), tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + (x3), tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + (x3), tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + (x3), tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + (x3), tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + (x3), tmp63, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vs/cvsgt2p572puh5ryhxl6bcviuizky5j4dgt53l7wfwlf2juihd4a.py # Topologically Sorted Source Nodes: [vlad, setitem, setitem_1, setitem_2, setitem_3, setitem_4, setitem_5, setitem_6, setitem_7, setitem_8, setitem_9, setitem_10, setitem_11, setitem_12, setitem_13, setitem_14, setitem_15, setitem_16, setitem_17, setitem_18, setitem_19, setitem_20, setitem_21, setitem_22, setitem_23, setitem_24, setitem_25, setitem_26, setitem_27, setitem_28, setitem_29, setitem_30, setitem_31, setitem_32, setitem_33, setitem_34, setitem_35, setitem_36, setitem_37, setitem_38, setitem_39, setitem_40, setitem_41, setitem_42, setitem_43, setitem_44, setitem_45, setitem_46, setitem_47, setitem_48, setitem_49, setitem_50, setitem_51, setitem_52, setitem_53, setitem_54, setitem_55, setitem_56, setitem_57, setitem_58, setitem_59, setitem_60, setitem_61, setitem_62, setitem_63, vlad_1], Original ATen: [aten.zeros, aten.copy, aten.linalg_vector_norm] # Source node to ATen node mapping: # setitem => copy # setitem_1 => copy_1 # setitem_10 => copy_10 # setitem_11 => copy_11 # setitem_12 => copy_12 # setitem_13 => copy_13 # setitem_14 => copy_14 # setitem_15 => copy_15 # setitem_16 => copy_16 # setitem_17 => copy_17 # setitem_18 => copy_18 # setitem_19 => copy_19 # setitem_2 => copy_2 # setitem_20 => copy_20 # setitem_21 => copy_21 # setitem_22 => copy_22 # setitem_23 => copy_23 # setitem_24 => copy_24 # setitem_25 => copy_25 # setitem_26 => copy_26 # setitem_27 => copy_27 # setitem_28 => copy_28 # setitem_29 => copy_29 # setitem_3 => copy_3 # setitem_30 => copy_30 # setitem_31 => copy_31 # setitem_32 => copy_32 # setitem_33 => copy_33 # setitem_34 => copy_34 # setitem_35 => copy_35 # setitem_36 => copy_36 # setitem_37 => copy_37 # setitem_38 => copy_38 # setitem_39 => copy_39 # setitem_4 => copy_4 # setitem_40 => copy_40 # setitem_41 => copy_41 # setitem_42 => copy_42 # setitem_43 => copy_43 # setitem_44 => copy_44 # setitem_45 => copy_45 # setitem_46 => copy_46 # setitem_47 => copy_47 # setitem_48 => copy_48 # setitem_49 => copy_49 # setitem_5 => copy_5 # setitem_50 => copy_50 # setitem_51 => copy_51 # setitem_52 => copy_52 # setitem_53 => copy_53 # setitem_54 => copy_54 # setitem_55 => copy_55 # setitem_56 => copy_56 # setitem_57 => copy_57 # setitem_58 => copy_58 # setitem_59 => copy_59 # setitem_6 => copy_6 # setitem_60 => copy_60 # setitem_61 => copy_61 # setitem_62 => copy_62 # setitem_63 => copy_63 # setitem_7 => copy_7 # setitem_8 => copy_8 # setitem_9 => copy_9 # vlad => full # vlad_1 => pow_3, pow_4, sum_67 # Graph fragment: # %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 64, 128], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_7, %sum_3), kwargs = {}) # %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%full, %copy, 1, 0, 1), kwargs = {}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_26, %sum_4), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %copy_1, 1, 1, 2), kwargs = {}) # %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_45, %sum_5), kwargs = {}) # %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %copy_2, 1, 2, 3), kwargs = {}) # %copy_3 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_64, %sum_6), kwargs = {}) # %slice_scatter_default_3 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_2, %copy_3, 1, 3, 4), kwargs = {}) # %copy_4 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_83, %sum_7), kwargs = {}) # %slice_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_3, %copy_4, 1, 4, 5), kwargs = {}) # %copy_5 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_102, %sum_8), kwargs = {}) # %slice_scatter_default_5 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %copy_5, 1, 5, 6), kwargs = {}) # %copy_6 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_121, %sum_9), kwargs = {}) # %slice_scatter_default_6 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_5, %copy_6, 1, 6, 7), kwargs = {}) # %copy_7 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_140, %sum_10), kwargs = {}) # %slice_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_6, %copy_7, 1, 7, 8), kwargs = {}) # %copy_8 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_159, %sum_11), kwargs = {}) # %slice_scatter_default_8 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %copy_8, 1, 8, 9), kwargs = {}) # %copy_9 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_178, %sum_12), kwargs = {}) # %slice_scatter_default_9 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_8, %copy_9, 1, 9, 10), kwargs = {}) # %copy_10 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_197, %sum_13), kwargs = {}) # %slice_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_9, %copy_10, 1, 10, 11), kwargs = {}) # %copy_11 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_216, %sum_14), kwargs = {}) # %slice_scatter_default_11 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_10, %copy_11, 1, 11, 12), kwargs = {}) # %copy_12 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_235, %sum_15), kwargs = {}) # %slice_scatter_default_12 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_11, %copy_12, 1, 12, 13), kwargs = {}) # %copy_13 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_254, %sum_16), kwargs = {}) # %slice_scatter_default_13 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_12, %copy_13, 1, 13, 14), kwargs = {}) # %copy_14 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_273, %sum_17), kwargs = {}) # %slice_scatter_default_14 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_13, %copy_14, 1, 14, 15), kwargs = {}) # %copy_15 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_292, %sum_18), kwargs = {}) # %slice_scatter_default_15 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_14, %copy_15, 1, 15, 16), kwargs = {}) # %copy_16 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_311, %sum_19), kwargs = {}) # %slice_scatter_default_16 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_15, %copy_16, 1, 16, 17), kwargs = {}) # %copy_17 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_330, %sum_20), kwargs = {}) # %slice_scatter_default_17 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_16, %copy_17, 1, 17, 18), kwargs = {}) # %copy_18 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_349, %sum_21), kwargs = {}) # %slice_scatter_default_18 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_17, %copy_18, 1, 18, 19), kwargs = {}) # %copy_19 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_368, %sum_22), kwargs = {}) # %slice_scatter_default_19 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_18, %copy_19, 1, 19, 20), kwargs = {}) # %copy_20 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_387, %sum_23), kwargs = {}) # %slice_scatter_default_20 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_19, %copy_20, 1, 20, 21), kwargs = {}) # %copy_21 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_406, %sum_24), kwargs = {}) # %slice_scatter_default_21 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_20, %copy_21, 1, 21, 22), kwargs = {}) # %copy_22 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_425, %sum_25), kwargs = {}) # %slice_scatter_default_22 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_21, %copy_22, 1, 22, 23), kwargs = {}) # %copy_23 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_444, %sum_26), kwargs = {}) # %slice_scatter_default_23 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_22, %copy_23, 1, 23, 24), kwargs = {}) # %copy_24 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_463, %sum_27), kwargs = {}) # %slice_scatter_default_24 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_23, %copy_24, 1, 24, 25), kwargs = {}) # %copy_25 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_482, %sum_28), kwargs = {}) # %slice_scatter_default_25 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_24, %copy_25, 1, 25, 26), kwargs = {}) # %copy_26 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_501, %sum_29), kwargs = {}) # %slice_scatter_default_26 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_25, %copy_26, 1, 26, 27), kwargs = {}) # %copy_27 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_520, %sum_30), kwargs = {}) # %slice_scatter_default_27 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_26, %copy_27, 1, 27, 28), kwargs = {}) # %copy_28 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_539, %sum_31), kwargs = {}) # %slice_scatter_default_28 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_27, %copy_28, 1, 28, 29), kwargs = {}) # %copy_29 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_558, %sum_32), kwargs = {}) # %slice_scatter_default_29 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_28, %copy_29, 1, 29, 30), kwargs = {}) # %copy_30 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_577, %sum_33), kwargs = {}) # %slice_scatter_default_30 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_29, %copy_30, 1, 30, 31), kwargs = {}) # %copy_31 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_596, %sum_34), kwargs = {}) # %slice_scatter_default_31 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_30, %copy_31, 1, 31, 32), kwargs = {}) # %copy_32 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_615, %sum_35), kwargs = {}) # %slice_scatter_default_32 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_31, %copy_32, 1, 32, 33), kwargs = {}) # %copy_33 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_634, %sum_36), kwargs = {}) # %slice_scatter_default_33 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_32, %copy_33, 1, 33, 34), kwargs = {}) # %copy_34 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_653, %sum_37), kwargs = {}) # %slice_scatter_default_34 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_33, %copy_34, 1, 34, 35), kwargs = {}) # %copy_35 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_672, %sum_38), kwargs = {}) # %slice_scatter_default_35 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_34, %copy_35, 1, 35, 36), kwargs = {}) # %copy_36 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_691, %sum_39), kwargs = {}) # %slice_scatter_default_36 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_35, %copy_36, 1, 36, 37), kwargs = {}) # %copy_37 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_710, %sum_40), kwargs = {}) # %slice_scatter_default_37 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_36, %copy_37, 1, 37, 38), kwargs = {}) # %copy_38 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_729, %sum_41), kwargs = {}) # %slice_scatter_default_38 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_37, %copy_38, 1, 38, 39), kwargs = {}) # %copy_39 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_748, %sum_42), kwargs = {}) # %slice_scatter_default_39 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_38, %copy_39, 1, 39, 40), kwargs = {}) # %copy_40 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_767, %sum_43), kwargs = {}) # %slice_scatter_default_40 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_39, %copy_40, 1, 40, 41), kwargs = {}) # %copy_41 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_786, %sum_44), kwargs = {}) # %slice_scatter_default_41 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_40, %copy_41, 1, 41, 42), kwargs = {}) # %copy_42 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_805, %sum_45), kwargs = {}) # %slice_scatter_default_42 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_41, %copy_42, 1, 42, 43), kwargs = {}) # %copy_43 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_824, %sum_46), kwargs = {}) # %slice_scatter_default_43 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_42, %copy_43, 1, 43, 44), kwargs = {}) # %copy_44 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_843, %sum_47), kwargs = {}) # %slice_scatter_default_44 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_43, %copy_44, 1, 44, 45), kwargs = {}) # %copy_45 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_862, %sum_48), kwargs = {}) # %slice_scatter_default_45 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_44, %copy_45, 1, 45, 46), kwargs = {}) # %copy_46 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_881, %sum_49), kwargs = {}) # %slice_scatter_default_46 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_45, %copy_46, 1, 46, 47), kwargs = {}) # %copy_47 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_900, %sum_50), kwargs = {}) # %slice_scatter_default_47 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_46, %copy_47, 1, 47, 48), kwargs = {}) # %copy_48 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_919, %sum_51), kwargs = {}) # %slice_scatter_default_48 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_47, %copy_48, 1, 48, 49), kwargs = {}) # %copy_49 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_938, %sum_52), kwargs = {}) # %slice_scatter_default_49 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_48, %copy_49, 1, 49, 50), kwargs = {}) # %copy_50 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_957, %sum_53), kwargs = {}) # %slice_scatter_default_50 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_49, %copy_50, 1, 50, 51), kwargs = {}) # %copy_51 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_976, %sum_54), kwargs = {}) # %slice_scatter_default_51 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_50, %copy_51, 1, 51, 52), kwargs = {}) # %copy_52 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_995, %sum_55), kwargs = {}) # %slice_scatter_default_52 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_51, %copy_52, 1, 52, 53), kwargs = {}) # %copy_53 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1014, %sum_56), kwargs = {}) # %slice_scatter_default_53 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_52, %copy_53, 1, 53, 54), kwargs = {}) # %copy_54 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1033, %sum_57), kwargs = {}) # %slice_scatter_default_54 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_53, %copy_54, 1, 54, 55), kwargs = {}) # %copy_55 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1052, %sum_58), kwargs = {}) # %slice_scatter_default_55 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_54, %copy_55, 1, 55, 56), kwargs = {}) # %copy_56 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1071, %sum_59), kwargs = {}) # %slice_scatter_default_56 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_55, %copy_56, 1, 56, 57), kwargs = {}) # %copy_57 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1090, %sum_60), kwargs = {}) # %slice_scatter_default_57 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_56, %copy_57, 1, 57, 58), kwargs = {}) # %copy_58 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1109, %sum_61), kwargs = {}) # %slice_scatter_default_58 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_57, %copy_58, 1, 58, 59), kwargs = {}) # %copy_59 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1128, %sum_62), kwargs = {}) # %slice_scatter_default_59 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_58, %copy_59, 1, 59, 60), kwargs = {}) # %copy_60 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1147, %sum_63), kwargs = {}) # %slice_scatter_default_60 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_59, %copy_60, 1, 60, 61), kwargs = {}) # %copy_61 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1166, %sum_64), kwargs = {}) # %slice_scatter_default_61 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_60, %copy_61, 1, 61, 62), kwargs = {}) # %copy_62 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1185, %sum_65), kwargs = {}) # %slice_scatter_default_62 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_61, %copy_62, 1, 62, 63), kwargs = {}) # %copy_63 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1204, %sum_66), kwargs = {}) # %slice_scatter_default_63 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_62, %copy_63, 1, 63, 64), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%slice_scatter_default_63, 2), kwargs = {}) # %sum_67 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2], True), kwargs = {}) # %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_67, 0.5), kwargs = {}) triton_per_fused_copy_linalg_vector_norm_zeros_6 = async_compile.triton('triton_per_fused_copy_linalg_vector_norm_zeros_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: '*fp32', 66: 'i32', 67: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_copy_linalg_vector_norm_zeros_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 64, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 64 r2 = rindex x1 = (xindex // 64) x3 = xindex tmp0 = x0 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (r2 + (128*x1)), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.full([1, 1], 3, tl.int64) tmp8 = tmp0 >= tmp7 tmp9 = tmp0 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tl.load(in_ptr1 + (r2 + (128*x1)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp0 >= tmp12 tmp14 = tmp0 < tmp7 tmp15 = tmp13 & tmp14 tmp16 = tl.load(in_ptr2 + (r2 + (128*x1)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.full([1, 1], 1, tl.int64) tmp18 = tmp0 >= tmp17 tmp19 = tmp0 < tmp12 tmp20 = tmp18 & tmp19 tmp21 = tl.load(in_ptr3 + (r2 + (128*x1)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp0 < tmp17 tmp23 = tl.load(in_ptr4 + (r2 + (128*x1)), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = 0.0 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tl.where(tmp20, tmp21, tmp25) tmp27 = tl.where(tmp15, tmp16, tmp26) tmp28 = tl.where(tmp10, tmp11, tmp27) tmp29 = tl.where(tmp5, tmp6, tmp28) tmp30 = tl.full([1, 1], 8, tl.int64) tmp31 = tmp0 >= tmp30 tmp32 = tl.full([1, 1], 9, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = tl.load(in_ptr5 + (r2 + (128*x1)), tmp34 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tl.full([1, 1], 7, tl.int64) tmp37 = tmp0 >= tmp36 tmp38 = tmp0 < tmp30 tmp39 = tmp37 & tmp38 tmp40 = tl.load(in_ptr6 + (r2 + (128*x1)), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = tl.full([1, 1], 6, tl.int64) tmp42 = tmp0 >= tmp41 tmp43 = tmp0 < tmp36 tmp44 = tmp42 & tmp43 tmp45 = tl.load(in_ptr7 + (r2 + (128*x1)), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp0 >= tmp3 tmp47 = tmp0 < tmp41 tmp48 = tmp46 & tmp47 tmp49 = tl.load(in_ptr8 + (r2 + (128*x1)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.where(tmp48, tmp49, tmp29) tmp51 = tl.where(tmp44, tmp45, tmp50) tmp52 = tl.where(tmp39, tmp40, tmp51) tmp53 = tl.where(tmp34, tmp35, tmp52) tmp54 = tl.full([1, 1], 12, tl.int64) tmp55 = tmp0 >= tmp54 tmp56 = tl.full([1, 1], 13, tl.int64) tmp57 = tmp0 < tmp56 tmp58 = tmp55 & tmp57 tmp59 = tl.load(in_ptr9 + (r2 + (128*x1)), tmp58 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.full([1, 1], 11, tl.int64) tmp61 = tmp0 >= tmp60 tmp62 = tmp0 < tmp54 tmp63 = tmp61 & tmp62 tmp64 = tl.load(in_ptr10 + (r2 + (128*x1)), tmp63 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.full([1, 1], 10, tl.int64) tmp66 = tmp0 >= tmp65 tmp67 = tmp0 < tmp60 tmp68 = tmp66 & tmp67 tmp69 = tl.load(in_ptr11 + (r2 + (128*x1)), tmp68 & xmask, eviction_policy='evict_last', other=0.0) tmp70 = tmp0 >= tmp32 tmp71 = tmp0 < tmp65 tmp72 = tmp70 & tmp71 tmp73 = tl.load(in_ptr12 + (r2 + (128*x1)), tmp72 & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.where(tmp72, tmp73, tmp53) tmp75 = tl.where(tmp68, tmp69, tmp74) tmp76 = tl.where(tmp63, tmp64, tmp75) tmp77 = tl.where(tmp58, tmp59, tmp76) tmp78 = tl.full([1, 1], 16, tl.int64) tmp79 = tmp0 >= tmp78 tmp80 = tl.full([1, 1], 17, tl.int64) tmp81 = tmp0 < tmp80 tmp82 = tmp79 & tmp81 tmp83 = tl.load(in_ptr13 + (r2 + (128*x1)), tmp82 & xmask, eviction_policy='evict_last', other=0.0) tmp84 = tl.full([1, 1], 15, tl.int64) tmp85 = tmp0 >= tmp84 tmp86 = tmp0 < tmp78 tmp87 = tmp85 & tmp86 tmp88 = tl.load(in_ptr14 + (r2 + (128*x1)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full([1, 1], 14, tl.int64) tmp90 = tmp0 >= tmp89 tmp91 = tmp0 < tmp84 tmp92 = tmp90 & tmp91 tmp93 = tl.load(in_ptr15 + (r2 + (128*x1)), tmp92 & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tmp0 >= tmp56 tmp95 = tmp0 < tmp89 tmp96 = tmp94 & tmp95 tmp97 = tl.load(in_ptr16 + (r2 + (128*x1)), tmp96 & xmask, eviction_policy='evict_last', other=0.0) tmp98 = tl.where(tmp96, tmp97, tmp77) tmp99 = tl.where(tmp92, tmp93, tmp98) tmp100 = tl.where(tmp87, tmp88, tmp99) tmp101 = tl.where(tmp82, tmp83, tmp100) tmp102 = tl.full([1, 1], 20, tl.int64) tmp103 = tmp0 >= tmp102 tmp104 = tl.full([1, 1], 21, tl.int64) tmp105 = tmp0 < tmp104 tmp106 = tmp103 & tmp105 tmp107 = tl.load(in_ptr17 + (r2 + (128*x1)), tmp106 & xmask, eviction_policy='evict_last', other=0.0) tmp108 = tl.full([1, 1], 19, tl.int64) tmp109 = tmp0 >= tmp108 tmp110 = tmp0 < tmp102 tmp111 = tmp109 & tmp110 tmp112 = tl.load(in_ptr18 + (r2 + (128*x1)), tmp111 & xmask, eviction_policy='evict_last', other=0.0) tmp113 = tl.full([1, 1], 18, tl.int64) tmp114 = tmp0 >= tmp113 tmp115 = tmp0 < tmp108 tmp116 = tmp114 & tmp115 tmp117 = tl.load(in_ptr19 + (r2 + (128*x1)), tmp116 & xmask, eviction_policy='evict_last', other=0.0) tmp118 = tmp0 >= tmp80 tmp119 = tmp0 < tmp113 tmp120 = tmp118 & tmp119 tmp121 = tl.load(in_ptr20 + (r2 + (128*x1)), tmp120 & xmask, eviction_policy='evict_last', other=0.0) tmp122 = tl.where(tmp120, tmp121, tmp101) tmp123 = tl.where(tmp116, tmp117, tmp122) tmp124 = tl.where(tmp111, tmp112, tmp123) tmp125 = tl.where(tmp106, tmp107, tmp124) tmp126 = tl.full([1, 1], 24, tl.int64) tmp127 = tmp0 >= tmp126 tmp128 = tl.full([1, 1], 25, tl.int64) tmp129 = tmp0 < tmp128 tmp130 = tmp127 & tmp129 tmp131 = tl.load(in_ptr21 + (r2 + (128*x1)), tmp130 & xmask, eviction_policy='evict_last', other=0.0) tmp132 = tl.full([1, 1], 23, tl.int64) tmp133 = tmp0 >= tmp132 tmp134 = tmp0 < tmp126 tmp135 = tmp133 & tmp134 tmp136 = tl.load(in_ptr22 + (r2 + (128*x1)), tmp135 & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.full([1, 1], 22, tl.int64) tmp138 = tmp0 >= tmp137 tmp139 = tmp0 < tmp132 tmp140 = tmp138 & tmp139 tmp141 = tl.load(in_ptr23 + (r2 + (128*x1)), tmp140 & xmask, eviction_policy='evict_last', other=0.0) tmp142 = tmp0 >= tmp104 tmp143 = tmp0 < tmp137 tmp144 = tmp142 & tmp143 tmp145 = tl.load(in_ptr24 + (r2 + (128*x1)), tmp144 & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.where(tmp144, tmp145, tmp125) tmp147 = tl.where(tmp140, tmp141, tmp146) tmp148 = tl.where(tmp135, tmp136, tmp147) tmp149 = tl.where(tmp130, tmp131, tmp148) tmp150 = tl.full([1, 1], 28, tl.int64) tmp151 = tmp0 >= tmp150 tmp152 = tl.full([1, 1], 29, tl.int64) tmp153 = tmp0 < tmp152 tmp154 = tmp151 & tmp153 tmp155 = tl.load(in_ptr25 + (r2 + (128*x1)), tmp154 & xmask, eviction_policy='evict_last', other=0.0) tmp156 = tl.full([1, 1], 27, tl.int64) tmp157 = tmp0 >= tmp156 tmp158 = tmp0 < tmp150 tmp159 = tmp157 & tmp158 tmp160 = tl.load(in_ptr26 + (r2 + (128*x1)), tmp159 & xmask, eviction_policy='evict_last', other=0.0) tmp161 = tl.full([1, 1], 26, tl.int64) tmp162 = tmp0 >= tmp161 tmp163 = tmp0 < tmp156 tmp164 = tmp162 & tmp163 tmp165 = tl.load(in_ptr27 + (r2 + (128*x1)), tmp164 & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tmp0 >= tmp128 tmp167 = tmp0 < tmp161 tmp168 = tmp166 & tmp167 tmp169 = tl.load(in_ptr28 + (r2 + (128*x1)), tmp168 & xmask, eviction_policy='evict_last', other=0.0) tmp170 = tl.where(tmp168, tmp169, tmp149) tmp171 = tl.where(tmp164, tmp165, tmp170) tmp172 = tl.where(tmp159, tmp160, tmp171) tmp173 = tl.where(tmp154, tmp155, tmp172) tmp174 = tl.full([1, 1], 32, tl.int64) tmp175 = tmp0 >= tmp174 tmp176 = tl.full([1, 1], 33, tl.int64) tmp177 = tmp0 < tmp176 tmp178 = tmp175 & tmp177 tmp179 = tl.load(in_ptr29 + (r2 + (128*x1)), tmp178 & xmask, eviction_policy='evict_last', other=0.0) tmp180 = tl.full([1, 1], 31, tl.int64) tmp181 = tmp0 >= tmp180 tmp182 = tmp0 < tmp174 tmp183 = tmp181 & tmp182 tmp184 = tl.load(in_ptr30 + (r2 + (128*x1)), tmp183 & xmask, eviction_policy='evict_last', other=0.0) tmp185 = tl.full([1, 1], 30, tl.int64) tmp186 = tmp0 >= tmp185 tmp187 = tmp0 < tmp180 tmp188 = tmp186 & tmp187 tmp189 = tl.load(in_ptr31 + (r2 + (128*x1)), tmp188 & xmask, eviction_policy='evict_last', other=0.0) tmp190 = tmp0 >= tmp152 tmp191 = tmp0 < tmp185 tmp192 = tmp190 & tmp191 tmp193 = tl.load(in_ptr32 + (r2 + (128*x1)), tmp192 & xmask, eviction_policy='evict_last', other=0.0) tmp194 = tl.where(tmp192, tmp193, tmp173) tmp195 = tl.where(tmp188, tmp189, tmp194) tmp196 = tl.where(tmp183, tmp184, tmp195) tmp197 = tl.where(tmp178, tmp179, tmp196) tmp198 = tl.full([1, 1], 36, tl.int64) tmp199 = tmp0 >= tmp198 tmp200 = tl.full([1, 1], 37, tl.int64) tmp201 = tmp0 < tmp200 tmp202 = tmp199 & tmp201 tmp203 = tl.load(in_ptr33 + (r2 + (128*x1)), tmp202 & xmask, eviction_policy='evict_last', other=0.0) tmp204 = tl.full([1, 1], 35, tl.int64) tmp205 = tmp0 >= tmp204 tmp206 = tmp0 < tmp198 tmp207 = tmp205 & tmp206 tmp208 = tl.load(in_ptr34 + (r2 + (128*x1)), tmp207 & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.full([1, 1], 34, tl.int64) tmp210 = tmp0 >= tmp209 tmp211 = tmp0 < tmp204 tmp212 = tmp210 & tmp211 tmp213 = tl.load(in_ptr35 + (r2 + (128*x1)), tmp212 & xmask, eviction_policy='evict_last', other=0.0) tmp214 = tmp0 >= tmp176 tmp215 = tmp0 < tmp209 tmp216 = tmp214 & tmp215 tmp217 = tl.load(in_ptr36 + (r2 + (128*x1)), tmp216 & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.where(tmp216, tmp217, tmp197) tmp219 = tl.where(tmp212, tmp213, tmp218) tmp220 = tl.where(tmp207, tmp208, tmp219) tmp221 = tl.where(tmp202, tmp203, tmp220) tmp222 = tl.full([1, 1], 40, tl.int64) tmp223 = tmp0 >= tmp222 tmp224 = tl.full([1, 1], 41, tl.int64) tmp225 = tmp0 < tmp224 tmp226 = tmp223 & tmp225 tmp227 = tl.load(in_ptr37 + (r2 + (128*x1)), tmp226 & xmask, eviction_policy='evict_last', other=0.0) tmp228 = tl.full([1, 1], 39, tl.int64) tmp229 = tmp0 >= tmp228 tmp230 = tmp0 < tmp222 tmp231 = tmp229 & tmp230 tmp232 = tl.load(in_ptr38 + (r2 + (128*x1)), tmp231 & xmask, eviction_policy='evict_last', other=0.0) tmp233 = tl.full([1, 1], 38, tl.int64) tmp234 = tmp0 >= tmp233 tmp235 = tmp0 < tmp228 tmp236 = tmp234 & tmp235 tmp237 = tl.load(in_ptr39 + (r2 + (128*x1)), tmp236 & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tmp0 >= tmp200 tmp239 = tmp0 < tmp233 tmp240 = tmp238 & tmp239 tmp241 = tl.load(in_ptr40 + (r2 + (128*x1)), tmp240 & xmask, eviction_policy='evict_last', other=0.0) tmp242 = tl.where(tmp240, tmp241, tmp221) tmp243 = tl.where(tmp236, tmp237, tmp242) tmp244 = tl.where(tmp231, tmp232, tmp243) tmp245 = tl.where(tmp226, tmp227, tmp244) tmp246 = tl.full([1, 1], 44, tl.int64) tmp247 = tmp0 >= tmp246 tmp248 = tl.full([1, 1], 45, tl.int64) tmp249 = tmp0 < tmp248 tmp250 = tmp247 & tmp249 tmp251 = tl.load(in_ptr41 + (r2 + (128*x1)), tmp250 & xmask, eviction_policy='evict_last', other=0.0) tmp252 = tl.full([1, 1], 43, tl.int64) tmp253 = tmp0 >= tmp252 tmp254 = tmp0 < tmp246 tmp255 = tmp253 & tmp254 tmp256 = tl.load(in_ptr42 + (r2 + (128*x1)), tmp255 & xmask, eviction_policy='evict_last', other=0.0) tmp257 = tl.full([1, 1], 42, tl.int64) tmp258 = tmp0 >= tmp257 tmp259 = tmp0 < tmp252 tmp260 = tmp258 & tmp259 tmp261 = tl.load(in_ptr43 + (r2 + (128*x1)), tmp260 & xmask, eviction_policy='evict_last', other=0.0) tmp262 = tmp0 >= tmp224 tmp263 = tmp0 < tmp257 tmp264 = tmp262 & tmp263 tmp265 = tl.load(in_ptr44 + (r2 + (128*x1)), tmp264 & xmask, eviction_policy='evict_last', other=0.0) tmp266 = tl.where(tmp264, tmp265, tmp245) tmp267 = tl.where(tmp260, tmp261, tmp266) tmp268 = tl.where(tmp255, tmp256, tmp267) tmp269 = tl.where(tmp250, tmp251, tmp268) tmp270 = tl.full([1, 1], 48, tl.int64) tmp271 = tmp0 >= tmp270 tmp272 = tl.full([1, 1], 49, tl.int64) tmp273 = tmp0 < tmp272 tmp274 = tmp271 & tmp273 tmp275 = tl.load(in_ptr45 + (r2 + (128*x1)), tmp274 & xmask, eviction_policy='evict_last', other=0.0) tmp276 = tl.full([1, 1], 47, tl.int64) tmp277 = tmp0 >= tmp276 tmp278 = tmp0 < tmp270 tmp279 = tmp277 & tmp278 tmp280 = tl.load(in_ptr46 + (r2 + (128*x1)), tmp279 & xmask, eviction_policy='evict_last', other=0.0) tmp281 = tl.full([1, 1], 46, tl.int64) tmp282 = tmp0 >= tmp281 tmp283 = tmp0 < tmp276 tmp284 = tmp282 & tmp283 tmp285 = tl.load(in_ptr47 + (r2 + (128*x1)), tmp284 & xmask, eviction_policy='evict_last', other=0.0) tmp286 = tmp0 >= tmp248 tmp287 = tmp0 < tmp281 tmp288 = tmp286 & tmp287 tmp289 = tl.load(in_ptr48 + (r2 + (128*x1)), tmp288 & xmask, eviction_policy='evict_last', other=0.0) tmp290 = tl.where(tmp288, tmp289, tmp269) tmp291 = tl.where(tmp284, tmp285, tmp290) tmp292 = tl.where(tmp279, tmp280, tmp291) tmp293 = tl.where(tmp274, tmp275, tmp292) tmp294 = tl.full([1, 1], 52, tl.int64) tmp295 = tmp0 >= tmp294 tmp296 = tl.full([1, 1], 53, tl.int64) tmp297 = tmp0 < tmp296 tmp298 = tmp295 & tmp297 tmp299 = tl.load(in_ptr49 + (r2 + (128*x1)), tmp298 & xmask, eviction_policy='evict_last', other=0.0) tmp300 = tl.full([1, 1], 51, tl.int64) tmp301 = tmp0 >= tmp300 tmp302 = tmp0 < tmp294 tmp303 = tmp301 & tmp302 tmp304 = tl.load(in_ptr50 + (r2 + (128*x1)), tmp303 & xmask, eviction_policy='evict_last', other=0.0) tmp305 = tl.full([1, 1], 50, tl.int64) tmp306 = tmp0 >= tmp305 tmp307 = tmp0 < tmp300 tmp308 = tmp306 & tmp307 tmp309 = tl.load(in_ptr51 + (r2 + (128*x1)), tmp308 & xmask, eviction_policy='evict_last', other=0.0) tmp310 = tmp0 >= tmp272 tmp311 = tmp0 < tmp305 tmp312 = tmp310 & tmp311 tmp313 = tl.load(in_ptr52 + (r2 + (128*x1)), tmp312 & xmask, eviction_policy='evict_last', other=0.0) tmp314 = tl.where(tmp312, tmp313, tmp293) tmp315 = tl.where(tmp308, tmp309, tmp314) tmp316 = tl.where(tmp303, tmp304, tmp315) tmp317 = tl.where(tmp298, tmp299, tmp316) tmp318 = tl.full([1, 1], 56, tl.int64) tmp319 = tmp0 >= tmp318 tmp320 = tl.full([1, 1], 57, tl.int64) tmp321 = tmp0 < tmp320 tmp322 = tmp319 & tmp321 tmp323 = tl.load(in_ptr53 + (r2 + (128*x1)), tmp322 & xmask, eviction_policy='evict_last', other=0.0) tmp324 = tl.full([1, 1], 55, tl.int64) tmp325 = tmp0 >= tmp324 tmp326 = tmp0 < tmp318 tmp327 = tmp325 & tmp326 tmp328 = tl.load(in_ptr54 + (r2 + (128*x1)), tmp327 & xmask, eviction_policy='evict_last', other=0.0) tmp329 = tl.full([1, 1], 54, tl.int64) tmp330 = tmp0 >= tmp329 tmp331 = tmp0 < tmp324 tmp332 = tmp330 & tmp331 tmp333 = tl.load(in_ptr55 + (r2 + (128*x1)), tmp332 & xmask, eviction_policy='evict_last', other=0.0) tmp334 = tmp0 >= tmp296 tmp335 = tmp0 < tmp329 tmp336 = tmp334 & tmp335 tmp337 = tl.load(in_ptr56 + (r2 + (128*x1)), tmp336 & xmask, eviction_policy='evict_last', other=0.0) tmp338 = tl.where(tmp336, tmp337, tmp317) tmp339 = tl.where(tmp332, tmp333, tmp338) tmp340 = tl.where(tmp327, tmp328, tmp339) tmp341 = tl.where(tmp322, tmp323, tmp340) tmp342 = tl.full([1, 1], 60, tl.int64) tmp343 = tmp0 >= tmp342 tmp344 = tl.full([1, 1], 61, tl.int64) tmp345 = tmp0 < tmp344 tmp346 = tmp343 & tmp345 tmp347 = tl.load(in_ptr57 + (r2 + (128*x1)), tmp346 & xmask, eviction_policy='evict_last', other=0.0) tmp348 = tl.full([1, 1], 59, tl.int64) tmp349 = tmp0 >= tmp348 tmp350 = tmp0 < tmp342 tmp351 = tmp349 & tmp350 tmp352 = tl.load(in_ptr58 + (r2 + (128*x1)), tmp351 & xmask, eviction_policy='evict_last', other=0.0) tmp353 = tl.full([1, 1], 58, tl.int64) tmp354 = tmp0 >= tmp353 tmp355 = tmp0 < tmp348 tmp356 = tmp354 & tmp355 tmp357 = tl.load(in_ptr59 + (r2 + (128*x1)), tmp356 & xmask, eviction_policy='evict_last', other=0.0) tmp358 = tmp0 >= tmp320 tmp359 = tmp0 < tmp353 tmp360 = tmp358 & tmp359 tmp361 = tl.load(in_ptr60 + (r2 + (128*x1)), tmp360 & xmask, eviction_policy='evict_last', other=0.0) tmp362 = tl.where(tmp360, tmp361, tmp341) tmp363 = tl.where(tmp356, tmp357, tmp362) tmp364 = tl.where(tmp351, tmp352, tmp363) tmp365 = tl.where(tmp346, tmp347, tmp364) tmp366 = tl.full([1, 1], 63, tl.int64) tmp367 = tmp0 >= tmp366 tmp368 = tl.load(in_ptr61 + (r2 + (128*x1)), tmp367 & xmask, eviction_policy='evict_last', other=0.0) tmp369 = tl.full([1, 1], 62, tl.int64) tmp370 = tmp0 >= tmp369 tmp371 = tmp0 < tmp366 tmp372 = tmp370 & tmp371 tmp373 = tl.load(in_ptr62 + (r2 + (128*x1)), tmp372 & xmask, eviction_policy='evict_last', other=0.0) tmp374 = tmp0 >= tmp344 tmp375 = tmp0 < tmp369 tmp376 = tmp374 & tmp375 tmp377 = tl.load(in_ptr63 + (r2 + (128*x1)), tmp376 & xmask, eviction_policy='evict_last', other=0.0) tmp378 = tl.where(tmp376, tmp377, tmp365) tmp379 = tl.where(tmp372, tmp373, tmp378) tmp380 = tl.where(tmp367, tmp368, tmp379) tmp381 = tmp380 * tmp380 tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK]) tmp384 = tl.where(xmask, tmp382, 0) tmp385 = tl.sum(tmp384, 1)[:, None] tmp386 = libdevice.sqrt(tmp385) tl.store(in_out_ptr0 + (r2 + (128*x3)), tmp380, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp386, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xt/cxtqnduhok2v4j5onaq63427jwrj6hwwv7ld23vqmaek6fsjbf2b.py # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm, aten.div] # Source node to ATen node mapping: # vlad_3 => div_3, pow_5, pow_6, sum_68 # Graph fragment: # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 2), kwargs = {}) # %sum_68 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {}) # %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_68, 0.5), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %expand_66), kwargs = {}) triton_red_fused_div_linalg_vector_norm_7 = async_compile.triton('triton_red_fused_div_linalg_vector_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[4, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_div_linalg_vector_norm_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 4 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + ((64*x0) + (r1 // 128)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tmp9 = libdevice.sqrt(tmp7) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tl.load(in_ptr1 + ((64*x0) + (r1 // 128)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp10 / tmp13 tmp15 = triton_helpers.maximum(tmp9, tmp12) tmp16 = tmp14 / tmp15 tl.store(out_ptr0 + (r1 + (8192*x0)), tmp16, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_3, (64, 128), (128, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm] stream0 = get_raw_stream(0) triton_red_fused_linalg_vector_norm_0.run(primals_1, buf0, 16384, 128, grid=grid(16384), stream=stream0) buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf9 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf13 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf16 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf20 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf22 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf25 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf27 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf29 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf31 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf34 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf36 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf38 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf40 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf43 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf45 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf47 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf49 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf52 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf54 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf56 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf58 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf61 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf63 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf65 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf67 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf70 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf72 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf74 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf76 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf79 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf81 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf83 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf85 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf88 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf90 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf92 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf94 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf97 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf99 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf101 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf103 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf106 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf108 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf110 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf112 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf115 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf117 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf119 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf121 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf124 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf126 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf128 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf130 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf133 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf135 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf137 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf139 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf142 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf144 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf146 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) # Topologically Sorted Source Nodes: [x, residual_2, residual_4, residual_6, residual_8, residual_10, residual_12, residual_14, residual_16, residual_18, residual_20, residual_22, residual_24, residual_26, residual_28, residual_30, residual_32, residual_34, residual_36, residual_38, residual_40, residual_42, residual_44, residual_46, residual_48, residual_50, residual_52, residual_54, residual_56, residual_58, residual_60, residual_62, residual_64, residual_66, residual_68, residual_70, residual_72, residual_74, residual_76, residual_78, residual_80, residual_82, residual_84, residual_86, residual_88, residual_90, residual_92, residual_94, residual_96, residual_98, residual_100, residual_102, residual_104, residual_106, residual_108, residual_110, residual_112, residual_114, residual_116, residual_118, residual_120, residual_122, residual_124, residual_126], Original ATen: [aten.div, aten.sub] triton_poi_fused_div_sub_1.run(primals_1, buf0, primals_3, buf1, buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, buf70, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, buf133, buf135, buf137, buf139, buf142, buf144, buf146, 2097152, grid=grid(2097152), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = empty_strided_cuda((4, 4096), (4096, 1), torch.int64) buf4 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) # Topologically Sorted Source Nodes: [hard_assign, soft_assign_1], Original ATen: [aten.argmax, aten._softmax] triton_per_fused__softmax_argmax_2.run(buf2, buf3, buf4, buf5, 16384, 64, grid=grid(16384), stream=stream0) buf6 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf8 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf10 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf12 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf14 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf17 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf19 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf21 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf23 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf26 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf28 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf30 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf32 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf35 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf37 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf39 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf41 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf44 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf46 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf48 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf50 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf53 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf55 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf57 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf59 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf62 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf64 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf66 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf68 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [residual, residual_1, sum_1, residual_3, sum_2, residual_5, sum_3, residual_7, sum_4, residual_9, sum_5, residual_11, sum_6, residual_13, sum_7, residual_15, sum_8, residual_17, sum_9, residual_19, sum_10, residual_21, sum_11, residual_23, sum_12, residual_25, sum_13, residual_27, sum_14, residual_29, sum_15, residual_31, sum_16, residual_33, sum_17, residual_35, sum_18, residual_37, sum_19, residual_39, sum_20, residual_41, sum_21, residual_43, sum_22, residual_45, sum_23, residual_47, sum_24, residual_49, sum_25, residual_51, sum_26, residual_53, sum_27, residual_55, sum_28, residual_57, sum_29], Original ATen: [aten.sub, aten.mul, aten.sum] triton_red_fused_mul_sub_sum_3.run(buf1, primals_3, buf2, buf4, buf5, buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, buf6, buf8, buf10, buf12, buf14, buf17, buf19, buf21, buf23, buf26, buf28, buf30, buf32, buf35, buf37, buf39, buf41, buf44, buf46, buf48, buf50, buf53, buf55, buf57, buf59, buf62, buf64, buf66, buf68, 512, 4096, grid=grid(512), stream=stream0) buf71 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf73 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf75 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf77 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf80 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf82 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf84 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf86 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf89 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf91 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf93 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf95 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf98 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf100 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf102 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf104 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf107 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf109 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf111 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf113 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf116 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf118 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf120 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf122 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf125 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf127 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf129 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf131 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [residual_59, sum_30, residual_61, sum_31, residual_63, sum_32, residual_65, sum_33, residual_67, sum_34, residual_69, sum_35, residual_71, sum_36, residual_73, sum_37, residual_75, sum_38, residual_77, sum_39, residual_79, sum_40, residual_81, sum_41, residual_83, sum_42, residual_85, sum_43, residual_87, sum_44, residual_89, sum_45, residual_91, sum_46, residual_93, sum_47, residual_95, sum_48, residual_97, sum_49, residual_99, sum_50, residual_101, sum_51, residual_103, sum_52, residual_105, sum_53, residual_107, sum_54, residual_109, sum_55, residual_111, sum_56, residual_113, sum_57], Original ATen: [aten.mul, aten.sum] triton_red_fused_mul_sum_4.run(buf70, buf2, buf4, buf5, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, buf71, buf73, buf75, buf77, buf80, buf82, buf84, buf86, buf89, buf91, buf93, buf95, buf98, buf100, buf102, buf104, buf107, buf109, buf111, buf113, buf116, buf118, buf120, buf122, buf125, buf127, buf129, buf131, 512, 4096, grid=grid(512), stream=stream0) buf134 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf136 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf138 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf140 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf143 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf145 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf147 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [residual_115, sum_58, residual_117, sum_59, residual_119, sum_60, residual_121, sum_61, residual_123, sum_62, residual_125, sum_63, residual_127, sum_64], Original ATen: [aten.mul, aten.sum] triton_red_fused_mul_sum_5.run(buf133, buf2, buf4, buf5, buf135, buf137, buf139, buf142, buf144, buf146, buf134, buf136, buf138, buf140, buf143, buf145, buf147, 512, 4096, grid=grid(512), stream=stream0) buf15 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32) buf24 = buf15; del buf15 # reuse buf33 = buf24; del buf24 # reuse buf42 = buf33; del buf33 # reuse buf51 = buf42; del buf42 # reuse buf60 = buf51; del buf51 # reuse buf69 = buf60; del buf60 # reuse buf78 = buf69; del buf69 # reuse buf87 = buf78; del buf78 # reuse buf96 = buf87; del buf87 # reuse buf105 = buf96; del buf96 # reuse buf114 = buf105; del buf105 # reuse buf123 = buf114; del buf114 # reuse buf132 = buf123; del buf123 # reuse buf141 = buf132; del buf132 # reuse buf148 = buf141; del buf141 # reuse buf149 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32) buf150 = reinterpret_tensor(buf149, (4, 64, 1), (64, 1, 1), 0); del buf149 # reuse # Topologically Sorted Source Nodes: [vlad, setitem, setitem_1, setitem_2, setitem_3, setitem_4, setitem_5, setitem_6, setitem_7, setitem_8, setitem_9, setitem_10, setitem_11, setitem_12, setitem_13, setitem_14, setitem_15, setitem_16, setitem_17, setitem_18, setitem_19, setitem_20, setitem_21, setitem_22, setitem_23, setitem_24, setitem_25, setitem_26, setitem_27, setitem_28, setitem_29, setitem_30, setitem_31, setitem_32, setitem_33, setitem_34, setitem_35, setitem_36, setitem_37, setitem_38, setitem_39, setitem_40, setitem_41, setitem_42, setitem_43, setitem_44, setitem_45, setitem_46, setitem_47, setitem_48, setitem_49, setitem_50, setitem_51, setitem_52, setitem_53, setitem_54, setitem_55, setitem_56, setitem_57, setitem_58, setitem_59, setitem_60, setitem_61, setitem_62, setitem_63, vlad_1], Original ATen: [aten.zeros, aten.copy, aten.linalg_vector_norm] triton_per_fused_copy_linalg_vector_norm_zeros_6.run(buf148, buf150, buf14, buf12, buf10, buf8, buf6, buf23, buf21, buf19, buf17, buf32, buf30, buf28, buf26, buf41, buf39, buf37, buf35, buf50, buf48, buf46, buf44, buf59, buf57, buf55, buf53, buf68, buf66, buf64, buf62, buf77, buf75, buf73, buf71, buf86, buf84, buf82, buf80, buf95, buf93, buf91, buf89, buf104, buf102, buf100, buf98, buf113, buf111, buf109, buf107, buf122, buf120, buf118, buf116, buf131, buf129, buf127, buf125, buf140, buf138, buf136, buf134, buf147, buf145, buf143, 256, 128, grid=grid(256), stream=stream0) del buf10 del buf100 del buf102 del buf104 del buf107 del buf109 del buf111 del buf113 del buf116 del buf118 del buf12 del buf120 del buf122 del buf125 del buf127 del buf129 del buf131 del buf134 del buf136 del buf138 del buf14 del buf140 del buf143 del buf145 del buf147 del buf17 del buf19 del buf21 del buf23 del buf26 del buf28 del buf30 del buf32 del buf35 del buf37 del buf39 del buf41 del buf44 del buf46 del buf48 del buf50 del buf53 del buf55 del buf57 del buf59 del buf6 del buf62 del buf64 del buf66 del buf68 del buf71 del buf73 del buf75 del buf77 del buf8 del buf80 del buf82 del buf84 del buf86 del buf89 del buf91 del buf93 del buf95 del buf98 buf151 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf152 = reinterpret_tensor(buf151, (4, 1), (1, 1), 0); del buf151 # reuse buf153 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32) # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm, aten.div] triton_red_fused_div_linalg_vector_norm_7.run(buf152, buf148, buf150, buf153, 4, 8192, grid=grid(4), stream=stream0) return (buf153, buf3, primals_2, buf1, buf2, buf4, buf5, reinterpret_tensor(primals_3, (1, 128), (128, 1), 0), buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, buf70, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, buf133, buf135, buf137, buf139, buf142, buf144, buf146, buf148, buf150, buf152, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 128, 64, 64), (524288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from sklearn.neighbors import NearestNeighbors import torch.nn as nn import torch.nn.functional as F class NetVLAD(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=64, dim=128, normalize_input=True, vladv2=False): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors alpha : float Parameter of initialization. Larger value is harder assignment. normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. vladv2 : bool If true, use vladv2 otherwise use vladv1 """ super(NetVLAD, self).__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = 0 self.vladv2 = vladv2 self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias= vladv2) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) def init_params(self, clsts, traindescs): if self.vladv2 is False: clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True) dots = np.dot(clstsAssign, traindescs.T) dots.sort(0) dots = dots[::-1, :] self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha * clstsAssign).unsqueeze(2).unsqueeze(3)) self.conv.bias = None else: knn = NearestNeighbors(n_jobs=-1) knn.fit(traindescs) del traindescs dsSq = np.square(knn.kneighbors(clsts, 2)[1]) del knn self.alpha = (-np.log(0.01) / np.mean(dsSq[:, 1] - dsSq[:, 0]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) del clsts, dsSq self.conv.weight = nn.Parameter((2.0 * self.alpha * self. centroids).unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm (dim=1)) def forward(self, x): N, C = x.shape[:2] if self.normalize_input: x = F.normalize(x, p=2, dim=1) soft_assign = self.conv(x).view(N, self.num_clusters, -1) hard_assign = torch.argmax(soft_assign, dim=1) soft_assign = F.softmax(soft_assign, dim=1) x_flatten = x.view(N, C, -1) vlad = torch.zeros([N, self.num_clusters, C], dtype=x.dtype, layout =x.layout, device=x.device) for C in range(self.num_clusters): residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3 ) - self.centroids[C:C + 1, :].expand(x_flatten.size(-1), - 1, -1).permute(1, 2, 0).unsqueeze(0) residual *= soft_assign[:, C:C + 1, :].unsqueeze(2) vlad[:, C:C + 1, :] = residual.sum(dim=-1) vlad = F.normalize(vlad, p=2, dim=2) vlad = vlad.view(x.size(0), -1) vlad = F.normalize(vlad, p=2, dim=1) return vlad, hard_assign def get_inputs(): return [torch.rand([4, 128, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np from sklearn.neighbors import NearestNeighbors import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = xindex // 4096 _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 524288 * x1), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61, out_ptr62, out_ptr63, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 524288 x1 = xindex // 4096 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last') tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last') tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last') tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last') tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last') tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last') tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last') tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last') tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last') tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last') tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last') tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last') tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last') tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last') tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last') tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last') tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last') tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last') tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last') tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last') tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last') tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last') tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last') tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last') tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last') tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last') tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last') tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last') tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last') tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last') tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last') tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last') tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp7 = tmp5 - tmp6 tmp9 = tmp5 - tmp8 tmp11 = tmp5 - tmp10 tmp13 = tmp5 - tmp12 tmp15 = tmp5 - tmp14 tmp17 = tmp5 - tmp16 tmp19 = tmp5 - tmp18 tmp21 = tmp5 - tmp20 tmp23 = tmp5 - tmp22 tmp25 = tmp5 - tmp24 tmp27 = tmp5 - tmp26 tmp29 = tmp5 - tmp28 tmp31 = tmp5 - tmp30 tmp33 = tmp5 - tmp32 tmp35 = tmp5 - tmp34 tmp37 = tmp5 - tmp36 tmp39 = tmp5 - tmp38 tmp41 = tmp5 - tmp40 tmp43 = tmp5 - tmp42 tmp45 = tmp5 - tmp44 tmp47 = tmp5 - tmp46 tmp49 = tmp5 - tmp48 tmp51 = tmp5 - tmp50 tmp53 = tmp5 - tmp52 tmp55 = tmp5 - tmp54 tmp57 = tmp5 - tmp56 tmp59 = tmp5 - tmp58 tmp61 = tmp5 - tmp60 tmp63 = tmp5 - tmp62 tmp65 = tmp5 - tmp64 tmp67 = tmp5 - tmp66 tmp69 = tmp5 - tmp68 tmp71 = tmp5 - tmp70 tmp73 = tmp5 - tmp72 tmp75 = tmp5 - tmp74 tmp77 = tmp5 - tmp76 tmp79 = tmp5 - tmp78 tmp81 = tmp5 - tmp80 tmp83 = tmp5 - tmp82 tmp85 = tmp5 - tmp84 tmp87 = tmp5 - tmp86 tmp89 = tmp5 - tmp88 tmp91 = tmp5 - tmp90 tmp93 = tmp5 - tmp92 tmp95 = tmp5 - tmp94 tmp97 = tmp5 - tmp96 tmp99 = tmp5 - tmp98 tmp101 = tmp5 - tmp100 tmp103 = tmp5 - tmp102 tmp105 = tmp5 - tmp104 tmp107 = tmp5 - tmp106 tmp109 = tmp5 - tmp108 tmp111 = tmp5 - tmp110 tmp113 = tmp5 - tmp112 tmp115 = tmp5 - tmp114 tmp117 = tmp5 - tmp116 tmp119 = tmp5 - tmp118 tmp121 = tmp5 - tmp120 tmp123 = tmp5 - tmp122 tmp125 = tmp5 - tmp124 tmp127 = tmp5 - tmp126 tmp129 = tmp5 - tmp128 tmp131 = tmp5 - tmp130 tl.store(out_ptr0 + x3, tmp5, None) tl.store(out_ptr1 + x3, tmp7, None) tl.store(out_ptr2 + x3, tmp9, None) tl.store(out_ptr3 + x3, tmp11, None) tl.store(out_ptr4 + x3, tmp13, None) tl.store(out_ptr5 + x3, tmp15, None) tl.store(out_ptr6 + x3, tmp17, None) tl.store(out_ptr7 + x3, tmp19, None) tl.store(out_ptr8 + x3, tmp21, None) tl.store(out_ptr9 + x3, tmp23, None) tl.store(out_ptr10 + x3, tmp25, None) tl.store(out_ptr11 + x3, tmp27, None) tl.store(out_ptr12 + x3, tmp29, None) tl.store(out_ptr13 + x3, tmp31, None) tl.store(out_ptr14 + x3, tmp33, None) tl.store(out_ptr15 + x3, tmp35, None) tl.store(out_ptr16 + x3, tmp37, None) tl.store(out_ptr17 + x3, tmp39, None) tl.store(out_ptr18 + x3, tmp41, None) tl.store(out_ptr19 + x3, tmp43, None) tl.store(out_ptr20 + x3, tmp45, None) tl.store(out_ptr21 + x3, tmp47, None) tl.store(out_ptr22 + x3, tmp49, None) tl.store(out_ptr23 + x3, tmp51, None) tl.store(out_ptr24 + x3, tmp53, None) tl.store(out_ptr25 + x3, tmp55, None) tl.store(out_ptr26 + x3, tmp57, None) tl.store(out_ptr27 + x3, tmp59, None) tl.store(out_ptr28 + x3, tmp61, None) tl.store(out_ptr29 + x3, tmp63, None) tl.store(out_ptr30 + x3, tmp65, None) tl.store(out_ptr31 + x3, tmp67, None) tl.store(out_ptr32 + x3, tmp69, None) tl.store(out_ptr33 + x3, tmp71, None) tl.store(out_ptr34 + x3, tmp73, None) tl.store(out_ptr35 + x3, tmp75, None) tl.store(out_ptr36 + x3, tmp77, None) tl.store(out_ptr37 + x3, tmp79, None) tl.store(out_ptr38 + x3, tmp81, None) tl.store(out_ptr39 + x3, tmp83, None) tl.store(out_ptr40 + x3, tmp85, None) tl.store(out_ptr41 + x3, tmp87, None) tl.store(out_ptr42 + x3, tmp89, None) tl.store(out_ptr43 + x3, tmp91, None) tl.store(out_ptr44 + x3, tmp93, None) tl.store(out_ptr45 + x3, tmp95, None) tl.store(out_ptr46 + x3, tmp97, None) tl.store(out_ptr47 + x3, tmp99, None) tl.store(out_ptr48 + x3, tmp101, None) tl.store(out_ptr49 + x3, tmp103, None) tl.store(out_ptr50 + x3, tmp105, None) tl.store(out_ptr51 + x3, tmp107, None) tl.store(out_ptr52 + x3, tmp109, None) tl.store(out_ptr53 + x3, tmp111, None) tl.store(out_ptr54 + x3, tmp113, None) tl.store(out_ptr55 + x3, tmp115, None) tl.store(out_ptr56 + x3, tmp117, None) tl.store(out_ptr57 + x3, tmp119, None) tl.store(out_ptr58 + x3, tmp121, None) tl.store(out_ptr59 + x3, tmp123, None) tl.store(out_ptr60 + x3, tmp125, None) tl.store(out_ptr61 + x3, tmp127, None) tl.store(out_ptr62 + x3, tmp129, None) tl.store(out_ptr63 + x3, tmp131, None) @triton.jit def triton_per_fused__softmax_argmax_2(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = xindex // 4096 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 262144 * x1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(rindex, tmp1.shape) _, tmp2_tmp = triton_helpers.max_with_index(tmp1, tmp3, 1) tmp2 = tmp2_tmp[:, None] tmp5 = triton_helpers.max2(tmp1, 1)[:, None] tmp6 = tmp0 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tl.store(out_ptr0 + x3, tmp2, None) tl.store(out_ptr1 + x3, tmp5, None) tl.store(out_ptr2 + x3, tmp10, None) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 128 tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') x1 = xindex // 128 _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp3 = tl.load(in_ptr2 + (r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp14 = tl.load(in_ptr2 + (4096 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp23 = tl.load(in_ptr2 + (8192 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp32 = tl.load(in_ptr2 + (12288 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp41 = tl.load(in_ptr2 + (16384 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp49 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp50 = tl.load(in_ptr2 + (20480 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp58 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp59 = tl.load(in_ptr2 + (24576 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp67 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp68 = tl.load(in_ptr2 + (28672 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp76 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp77 = tl.load(in_ptr2 + (32768 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp86 = tl.load(in_ptr2 + (36864 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp95 = tl.load(in_ptr2 + (40960 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp103 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp104 = tl.load(in_ptr2 + (45056 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp112 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp113 = tl.load(in_ptr2 + (49152 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp121 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp122 = tl.load(in_ptr2 + (53248 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp130 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp131 = tl.load(in_ptr2 + (57344 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp139 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp140 = tl.load(in_ptr2 + (61440 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp148 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp149 = tl.load(in_ptr2 + (65536 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp157 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp158 = tl.load(in_ptr2 + (69632 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp167 = tl.load(in_ptr2 + (73728 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp175 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp176 = tl.load(in_ptr2 + (77824 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp184 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp185 = tl.load(in_ptr2 + (81920 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp193 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp194 = tl.load(in_ptr2 + (86016 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp202 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp203 = tl.load(in_ptr2 + (90112 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp211 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp212 = tl.load(in_ptr2 + (94208 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp220 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp221 = tl.load(in_ptr2 + (98304 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp229 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp230 = tl.load(in_ptr2 + (102400 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp239 = tl.load(in_ptr2 + (106496 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp247 = tl.load(in_ptr31 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp248 = tl.load(in_ptr2 + (110592 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp256 = tl.load(in_ptr32 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp257 = tl.load(in_ptr2 + (114688 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask & xmask, tmp12, _tmp11) tmp15 = tmp14 - tmp4 tmp16 = tl_math.exp(tmp15) tmp17 = tmp16 / tmp7 tmp18 = tmp13 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask & xmask, tmp21, _tmp20) tmp24 = tmp23 - tmp4 tmp25 = tl_math.exp(tmp24) tmp26 = tmp25 / tmp7 tmp27 = tmp22 * tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp33 = tmp32 - tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp34 / tmp7 tmp36 = tmp31 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = _tmp38 + tmp37 _tmp38 = tl.where(rmask & xmask, tmp39, _tmp38) tmp42 = tmp41 - tmp4 tmp43 = tl_math.exp(tmp42) tmp44 = tmp43 / tmp7 tmp45 = tmp40 * tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp48 = _tmp47 + tmp46 _tmp47 = tl.where(rmask & xmask, tmp48, _tmp47) tmp51 = tmp50 - tmp4 tmp52 = tl_math.exp(tmp51) tmp53 = tmp52 / tmp7 tmp54 = tmp49 * tmp53 tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp57 = _tmp56 + tmp55 _tmp56 = tl.where(rmask & xmask, tmp57, _tmp56) tmp60 = tmp59 - tmp4 tmp61 = tl_math.exp(tmp60) tmp62 = tmp61 / tmp7 tmp63 = tmp58 * tmp62 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask & xmask, tmp66, _tmp65) tmp69 = tmp68 - tmp4 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 / tmp7 tmp72 = tmp67 * tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = _tmp74 + tmp73 _tmp74 = tl.where(rmask & xmask, tmp75, _tmp74) tmp78 = tmp77 - tmp4 tmp79 = tl_math.exp(tmp78) tmp80 = tmp79 / tmp7 tmp81 = tmp76 * tmp80 tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK]) tmp84 = _tmp83 + tmp82 _tmp83 = tl.where(rmask & xmask, tmp84, _tmp83) tmp87 = tmp86 - tmp4 tmp88 = tl_math.exp(tmp87) tmp89 = tmp88 / tmp7 tmp90 = tmp85 * tmp89 tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK]) tmp93 = _tmp92 + tmp91 _tmp92 = tl.where(rmask & xmask, tmp93, _tmp92) tmp96 = tmp95 - tmp4 tmp97 = tl_math.exp(tmp96) tmp98 = tmp97 / tmp7 tmp99 = tmp94 * tmp98 tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK]) tmp102 = _tmp101 + tmp100 _tmp101 = tl.where(rmask & xmask, tmp102, _tmp101) tmp105 = tmp104 - tmp4 tmp106 = tl_math.exp(tmp105) tmp107 = tmp106 / tmp7 tmp108 = tmp103 * tmp107 tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK]) tmp111 = _tmp110 + tmp109 _tmp110 = tl.where(rmask & xmask, tmp111, _tmp110) tmp114 = tmp113 - tmp4 tmp115 = tl_math.exp(tmp114) tmp116 = tmp115 / tmp7 tmp117 = tmp112 * tmp116 tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK]) tmp120 = _tmp119 + tmp118 _tmp119 = tl.where(rmask & xmask, tmp120, _tmp119) tmp123 = tmp122 - tmp4 tmp124 = tl_math.exp(tmp123) tmp125 = tmp124 / tmp7 tmp126 = tmp121 * tmp125 tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK]) tmp129 = _tmp128 + tmp127 _tmp128 = tl.where(rmask & xmask, tmp129, _tmp128) tmp132 = tmp131 - tmp4 tmp133 = tl_math.exp(tmp132) tmp134 = tmp133 / tmp7 tmp135 = tmp130 * tmp134 tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK]) tmp138 = _tmp137 + tmp136 _tmp137 = tl.where(rmask & xmask, tmp138, _tmp137) tmp141 = tmp140 - tmp4 tmp142 = tl_math.exp(tmp141) tmp143 = tmp142 / tmp7 tmp144 = tmp139 * tmp143 tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK]) tmp147 = _tmp146 + tmp145 _tmp146 = tl.where(rmask & xmask, tmp147, _tmp146) tmp150 = tmp149 - tmp4 tmp151 = tl_math.exp(tmp150) tmp152 = tmp151 / tmp7 tmp153 = tmp148 * tmp152 tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK]) tmp156 = _tmp155 + tmp154 _tmp155 = tl.where(rmask & xmask, tmp156, _tmp155) tmp159 = tmp158 - tmp4 tmp160 = tl_math.exp(tmp159) tmp161 = tmp160 / tmp7 tmp162 = tmp157 * tmp161 tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK]) tmp165 = _tmp164 + tmp163 _tmp164 = tl.where(rmask & xmask, tmp165, _tmp164) tmp168 = tmp167 - tmp4 tmp169 = tl_math.exp(tmp168) tmp170 = tmp169 / tmp7 tmp171 = tmp166 * tmp170 tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK]) tmp174 = _tmp173 + tmp172 _tmp173 = tl.where(rmask & xmask, tmp174, _tmp173) tmp177 = tmp176 - tmp4 tmp178 = tl_math.exp(tmp177) tmp179 = tmp178 / tmp7 tmp180 = tmp175 * tmp179 tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK]) tmp183 = _tmp182 + tmp181 _tmp182 = tl.where(rmask & xmask, tmp183, _tmp182) tmp186 = tmp185 - tmp4 tmp187 = tl_math.exp(tmp186) tmp188 = tmp187 / tmp7 tmp189 = tmp184 * tmp188 tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK]) tmp192 = _tmp191 + tmp190 _tmp191 = tl.where(rmask & xmask, tmp192, _tmp191) tmp195 = tmp194 - tmp4 tmp196 = tl_math.exp(tmp195) tmp197 = tmp196 / tmp7 tmp198 = tmp193 * tmp197 tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK]) tmp201 = _tmp200 + tmp199 _tmp200 = tl.where(rmask & xmask, tmp201, _tmp200) tmp204 = tmp203 - tmp4 tmp205 = tl_math.exp(tmp204) tmp206 = tmp205 / tmp7 tmp207 = tmp202 * tmp206 tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK]) tmp210 = _tmp209 + tmp208 _tmp209 = tl.where(rmask & xmask, tmp210, _tmp209) tmp213 = tmp212 - tmp4 tmp214 = tl_math.exp(tmp213) tmp215 = tmp214 / tmp7 tmp216 = tmp211 * tmp215 tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK]) tmp219 = _tmp218 + tmp217 _tmp218 = tl.where(rmask & xmask, tmp219, _tmp218) tmp222 = tmp221 - tmp4 tmp223 = tl_math.exp(tmp222) tmp224 = tmp223 / tmp7 tmp225 = tmp220 * tmp224 tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK]) tmp228 = _tmp227 + tmp226 _tmp227 = tl.where(rmask & xmask, tmp228, _tmp227) tmp231 = tmp230 - tmp4 tmp232 = tl_math.exp(tmp231) tmp233 = tmp232 / tmp7 tmp234 = tmp229 * tmp233 tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK]) tmp237 = _tmp236 + tmp235 _tmp236 = tl.where(rmask & xmask, tmp237, _tmp236) tmp240 = tmp239 - tmp4 tmp241 = tl_math.exp(tmp240) tmp242 = tmp241 / tmp7 tmp243 = tmp238 * tmp242 tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK]) tmp246 = _tmp245 + tmp244 _tmp245 = tl.where(rmask & xmask, tmp246, _tmp245) tmp249 = tmp248 - tmp4 tmp250 = tl_math.exp(tmp249) tmp251 = tmp250 / tmp7 tmp252 = tmp247 * tmp251 tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK]) tmp255 = _tmp254 + tmp253 _tmp254 = tl.where(rmask & xmask, tmp255, _tmp254) tmp258 = tmp257 - tmp4 tmp259 = tl_math.exp(tmp258) tmp260 = tmp259 / tmp7 tmp261 = tmp256 * tmp260 tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK]) tmp264 = _tmp263 + tmp262 _tmp263 = tl.where(rmask & xmask, tmp264, _tmp263) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + x3, tmp11, xmask) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr1 + x3, tmp20, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr2 + x3, tmp29, xmask) tmp38 = tl.sum(_tmp38, 1)[:, None] tl.store(out_ptr3 + x3, tmp38, xmask) tmp47 = tl.sum(_tmp47, 1)[:, None] tl.store(out_ptr4 + x3, tmp47, xmask) tmp56 = tl.sum(_tmp56, 1)[:, None] tl.store(out_ptr5 + x3, tmp56, xmask) tmp65 = tl.sum(_tmp65, 1)[:, None] tl.store(out_ptr6 + x3, tmp65, xmask) tmp74 = tl.sum(_tmp74, 1)[:, None] tl.store(out_ptr7 + x3, tmp74, xmask) tmp83 = tl.sum(_tmp83, 1)[:, None] tl.store(out_ptr8 + x3, tmp83, xmask) tmp92 = tl.sum(_tmp92, 1)[:, None] tl.store(out_ptr9 + x3, tmp92, xmask) tmp101 = tl.sum(_tmp101, 1)[:, None] tl.store(out_ptr10 + x3, tmp101, xmask) tmp110 = tl.sum(_tmp110, 1)[:, None] tl.store(out_ptr11 + x3, tmp110, xmask) tmp119 = tl.sum(_tmp119, 1)[:, None] tl.store(out_ptr12 + x3, tmp119, xmask) tmp128 = tl.sum(_tmp128, 1)[:, None] tl.store(out_ptr13 + x3, tmp128, xmask) tmp137 = tl.sum(_tmp137, 1)[:, None] tl.store(out_ptr14 + x3, tmp137, xmask) tmp146 = tl.sum(_tmp146, 1)[:, None] tl.store(out_ptr15 + x3, tmp146, xmask) tmp155 = tl.sum(_tmp155, 1)[:, None] tl.store(out_ptr16 + x3, tmp155, xmask) tmp164 = tl.sum(_tmp164, 1)[:, None] tl.store(out_ptr17 + x3, tmp164, xmask) tmp173 = tl.sum(_tmp173, 1)[:, None] tl.store(out_ptr18 + x3, tmp173, xmask) tmp182 = tl.sum(_tmp182, 1)[:, None] tl.store(out_ptr19 + x3, tmp182, xmask) tmp191 = tl.sum(_tmp191, 1)[:, None] tl.store(out_ptr20 + x3, tmp191, xmask) tmp200 = tl.sum(_tmp200, 1)[:, None] tl.store(out_ptr21 + x3, tmp200, xmask) tmp209 = tl.sum(_tmp209, 1)[:, None] tl.store(out_ptr22 + x3, tmp209, xmask) tmp218 = tl.sum(_tmp218, 1)[:, None] tl.store(out_ptr23 + x3, tmp218, xmask) tmp227 = tl.sum(_tmp227, 1)[:, None] tl.store(out_ptr24 + x3, tmp227, xmask) tmp236 = tl.sum(_tmp236, 1)[:, None] tl.store(out_ptr25 + x3, tmp236, xmask) tmp245 = tl.sum(_tmp245, 1)[:, None] tl.store(out_ptr26 + x3, tmp245, xmask) tmp254 = tl.sum(_tmp254, 1)[:, None] tl.store(out_ptr27 + x3, tmp254, xmask) tmp263 = tl.sum(_tmp263, 1)[:, None] tl.store(out_ptr28 + x3, tmp263, xmask) @triton.jit def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = xindex // 128 _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (118784 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (122880 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (126976 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (131072 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (135168 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (139264 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (143360 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp66 = tl.load(in_ptr1 + (147456 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp75 = tl.load(in_ptr1 + (151552 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp83 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp84 = tl.load(in_ptr1 + (155648 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp92 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp93 = tl.load(in_ptr1 + (159744 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp101 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp102 = tl.load(in_ptr1 + (163840 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp110 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp111 = tl.load(in_ptr1 + (167936 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp119 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp120 = tl.load(in_ptr1 + (172032 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp128 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp129 = tl.load(in_ptr1 + (176128 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp138 = tl.load(in_ptr1 + (180224 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp147 = tl.load(in_ptr1 + (184320 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp155 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp156 = tl.load(in_ptr1 + (188416 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp164 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp165 = tl.load(in_ptr1 + (192512 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp173 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp174 = tl.load(in_ptr1 + (196608 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp182 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp183 = tl.load(in_ptr1 + (200704 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp191 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp192 = tl.load(in_ptr1 + (204800 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp200 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp201 = tl.load(in_ptr1 + (208896 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp210 = tl.load(in_ptr1 + (212992 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp219 = tl.load(in_ptr1 + (217088 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp227 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp228 = tl.load(in_ptr1 + (221184 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp236 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp237 = tl.load(in_ptr1 + (225280 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp245 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp246 = tl.load(in_ptr1 + (229376 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp67 = tmp66 - tmp2 tmp68 = tl_math.exp(tmp67) tmp69 = tmp68 / tmp5 tmp70 = tmp65 * tmp69 tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK]) tmp73 = _tmp72 + tmp71 _tmp72 = tl.where(rmask & xmask, tmp73, _tmp72) tmp76 = tmp75 - tmp2 tmp77 = tl_math.exp(tmp76) tmp78 = tmp77 / tmp5 tmp79 = tmp74 * tmp78 tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK]) tmp82 = _tmp81 + tmp80 _tmp81 = tl.where(rmask & xmask, tmp82, _tmp81) tmp85 = tmp84 - tmp2 tmp86 = tl_math.exp(tmp85) tmp87 = tmp86 / tmp5 tmp88 = tmp83 * tmp87 tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK]) tmp91 = _tmp90 + tmp89 _tmp90 = tl.where(rmask & xmask, tmp91, _tmp90) tmp94 = tmp93 - tmp2 tmp95 = tl_math.exp(tmp94) tmp96 = tmp95 / tmp5 tmp97 = tmp92 * tmp96 tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK]) tmp100 = _tmp99 + tmp98 _tmp99 = tl.where(rmask & xmask, tmp100, _tmp99) tmp103 = tmp102 - tmp2 tmp104 = tl_math.exp(tmp103) tmp105 = tmp104 / tmp5 tmp106 = tmp101 * tmp105 tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK]) tmp109 = _tmp108 + tmp107 _tmp108 = tl.where(rmask & xmask, tmp109, _tmp108) tmp112 = tmp111 - tmp2 tmp113 = tl_math.exp(tmp112) tmp114 = tmp113 / tmp5 tmp115 = tmp110 * tmp114 tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK]) tmp118 = _tmp117 + tmp116 _tmp117 = tl.where(rmask & xmask, tmp118, _tmp117) tmp121 = tmp120 - tmp2 tmp122 = tl_math.exp(tmp121) tmp123 = tmp122 / tmp5 tmp124 = tmp119 * tmp123 tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK]) tmp127 = _tmp126 + tmp125 _tmp126 = tl.where(rmask & xmask, tmp127, _tmp126) tmp130 = tmp129 - tmp2 tmp131 = tl_math.exp(tmp130) tmp132 = tmp131 / tmp5 tmp133 = tmp128 * tmp132 tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK]) tmp136 = _tmp135 + tmp134 _tmp135 = tl.where(rmask & xmask, tmp136, _tmp135) tmp139 = tmp138 - tmp2 tmp140 = tl_math.exp(tmp139) tmp141 = tmp140 / tmp5 tmp142 = tmp137 * tmp141 tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK]) tmp145 = _tmp144 + tmp143 _tmp144 = tl.where(rmask & xmask, tmp145, _tmp144) tmp148 = tmp147 - tmp2 tmp149 = tl_math.exp(tmp148) tmp150 = tmp149 / tmp5 tmp151 = tmp146 * tmp150 tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK]) tmp154 = _tmp153 + tmp152 _tmp153 = tl.where(rmask & xmask, tmp154, _tmp153) tmp157 = tmp156 - tmp2 tmp158 = tl_math.exp(tmp157) tmp159 = tmp158 / tmp5 tmp160 = tmp155 * tmp159 tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK]) tmp163 = _tmp162 + tmp161 _tmp162 = tl.where(rmask & xmask, tmp163, _tmp162) tmp166 = tmp165 - tmp2 tmp167 = tl_math.exp(tmp166) tmp168 = tmp167 / tmp5 tmp169 = tmp164 * tmp168 tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK]) tmp172 = _tmp171 + tmp170 _tmp171 = tl.where(rmask & xmask, tmp172, _tmp171) tmp175 = tmp174 - tmp2 tmp176 = tl_math.exp(tmp175) tmp177 = tmp176 / tmp5 tmp178 = tmp173 * tmp177 tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK]) tmp181 = _tmp180 + tmp179 _tmp180 = tl.where(rmask & xmask, tmp181, _tmp180) tmp184 = tmp183 - tmp2 tmp185 = tl_math.exp(tmp184) tmp186 = tmp185 / tmp5 tmp187 = tmp182 * tmp186 tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK]) tmp190 = _tmp189 + tmp188 _tmp189 = tl.where(rmask & xmask, tmp190, _tmp189) tmp193 = tmp192 - tmp2 tmp194 = tl_math.exp(tmp193) tmp195 = tmp194 / tmp5 tmp196 = tmp191 * tmp195 tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK]) tmp199 = _tmp198 + tmp197 _tmp198 = tl.where(rmask & xmask, tmp199, _tmp198) tmp202 = tmp201 - tmp2 tmp203 = tl_math.exp(tmp202) tmp204 = tmp203 / tmp5 tmp205 = tmp200 * tmp204 tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK]) tmp208 = _tmp207 + tmp206 _tmp207 = tl.where(rmask & xmask, tmp208, _tmp207) tmp211 = tmp210 - tmp2 tmp212 = tl_math.exp(tmp211) tmp213 = tmp212 / tmp5 tmp214 = tmp209 * tmp213 tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK]) tmp217 = _tmp216 + tmp215 _tmp216 = tl.where(rmask & xmask, tmp217, _tmp216) tmp220 = tmp219 - tmp2 tmp221 = tl_math.exp(tmp220) tmp222 = tmp221 / tmp5 tmp223 = tmp218 * tmp222 tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK]) tmp226 = _tmp225 + tmp224 _tmp225 = tl.where(rmask & xmask, tmp226, _tmp225) tmp229 = tmp228 - tmp2 tmp230 = tl_math.exp(tmp229) tmp231 = tmp230 / tmp5 tmp232 = tmp227 * tmp231 tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK]) tmp235 = _tmp234 + tmp233 _tmp234 = tl.where(rmask & xmask, tmp235, _tmp234) tmp238 = tmp237 - tmp2 tmp239 = tl_math.exp(tmp238) tmp240 = tmp239 / tmp5 tmp241 = tmp236 * tmp240 tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK]) tmp244 = _tmp243 + tmp242 _tmp243 = tl.where(rmask & xmask, tmp244, _tmp243) tmp247 = tmp246 - tmp2 tmp248 = tl_math.exp(tmp247) tmp249 = tmp248 / tmp5 tmp250 = tmp245 * tmp249 tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK]) tmp253 = _tmp252 + tmp251 _tmp252 = tl.where(rmask & xmask, tmp253, _tmp252) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + x3, tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + x3, tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + x3, tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + x3, tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + x3, tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + x3, tmp63, xmask) tmp72 = tl.sum(_tmp72, 1)[:, None] tl.store(out_ptr7 + x3, tmp72, xmask) tmp81 = tl.sum(_tmp81, 1)[:, None] tl.store(out_ptr8 + x3, tmp81, xmask) tmp90 = tl.sum(_tmp90, 1)[:, None] tl.store(out_ptr9 + x3, tmp90, xmask) tmp99 = tl.sum(_tmp99, 1)[:, None] tl.store(out_ptr10 + x3, tmp99, xmask) tmp108 = tl.sum(_tmp108, 1)[:, None] tl.store(out_ptr11 + x3, tmp108, xmask) tmp117 = tl.sum(_tmp117, 1)[:, None] tl.store(out_ptr12 + x3, tmp117, xmask) tmp126 = tl.sum(_tmp126, 1)[:, None] tl.store(out_ptr13 + x3, tmp126, xmask) tmp135 = tl.sum(_tmp135, 1)[:, None] tl.store(out_ptr14 + x3, tmp135, xmask) tmp144 = tl.sum(_tmp144, 1)[:, None] tl.store(out_ptr15 + x3, tmp144, xmask) tmp153 = tl.sum(_tmp153, 1)[:, None] tl.store(out_ptr16 + x3, tmp153, xmask) tmp162 = tl.sum(_tmp162, 1)[:, None] tl.store(out_ptr17 + x3, tmp162, xmask) tmp171 = tl.sum(_tmp171, 1)[:, None] tl.store(out_ptr18 + x3, tmp171, xmask) tmp180 = tl.sum(_tmp180, 1)[:, None] tl.store(out_ptr19 + x3, tmp180, xmask) tmp189 = tl.sum(_tmp189, 1)[:, None] tl.store(out_ptr20 + x3, tmp189, xmask) tmp198 = tl.sum(_tmp198, 1)[:, None] tl.store(out_ptr21 + x3, tmp198, xmask) tmp207 = tl.sum(_tmp207, 1)[:, None] tl.store(out_ptr22 + x3, tmp207, xmask) tmp216 = tl.sum(_tmp216, 1)[:, None] tl.store(out_ptr23 + x3, tmp216, xmask) tmp225 = tl.sum(_tmp225, 1)[:, None] tl.store(out_ptr24 + x3, tmp225, xmask) tmp234 = tl.sum(_tmp234, 1)[:, None] tl.store(out_ptr25 + x3, tmp234, xmask) tmp243 = tl.sum(_tmp243, 1)[:, None] tl.store(out_ptr26 + x3, tmp243, xmask) tmp252 = tl.sum(_tmp252, 1)[:, None] tl.store(out_ptr27 + x3, tmp252, xmask) @triton.jit def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = xindex // 128 _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (233472 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (237568 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (241664 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (245760 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (249856 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (253952 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (258048 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + x3, tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + x3, tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + x3, tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + x3, tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + x3, tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + x3, tmp63, xmask) @triton.jit def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 64 r2 = rindex x1 = xindex // 64 x3 = xindex tmp0 = x0 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (r2 + 128 * x1), tmp5 & xmask, eviction_policy ='evict_last', other=0.0) tmp7 = tl.full([1, 1], 3, tl.int64) tmp8 = tmp0 >= tmp7 tmp9 = tmp0 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tl.load(in_ptr1 + (r2 + 128 * x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp0 >= tmp12 tmp14 = tmp0 < tmp7 tmp15 = tmp13 & tmp14 tmp16 = tl.load(in_ptr2 + (r2 + 128 * x1), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.full([1, 1], 1, tl.int64) tmp18 = tmp0 >= tmp17 tmp19 = tmp0 < tmp12 tmp20 = tmp18 & tmp19 tmp21 = tl.load(in_ptr3 + (r2 + 128 * x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp0 < tmp17 tmp23 = tl.load(in_ptr4 + (r2 + 128 * x1), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = 0.0 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tl.where(tmp20, tmp21, tmp25) tmp27 = tl.where(tmp15, tmp16, tmp26) tmp28 = tl.where(tmp10, tmp11, tmp27) tmp29 = tl.where(tmp5, tmp6, tmp28) tmp30 = tl.full([1, 1], 8, tl.int64) tmp31 = tmp0 >= tmp30 tmp32 = tl.full([1, 1], 9, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = tl.load(in_ptr5 + (r2 + 128 * x1), tmp34 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tl.full([1, 1], 7, tl.int64) tmp37 = tmp0 >= tmp36 tmp38 = tmp0 < tmp30 tmp39 = tmp37 & tmp38 tmp40 = tl.load(in_ptr6 + (r2 + 128 * x1), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = tl.full([1, 1], 6, tl.int64) tmp42 = tmp0 >= tmp41 tmp43 = tmp0 < tmp36 tmp44 = tmp42 & tmp43 tmp45 = tl.load(in_ptr7 + (r2 + 128 * x1), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp0 >= tmp3 tmp47 = tmp0 < tmp41 tmp48 = tmp46 & tmp47 tmp49 = tl.load(in_ptr8 + (r2 + 128 * x1), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.where(tmp48, tmp49, tmp29) tmp51 = tl.where(tmp44, tmp45, tmp50) tmp52 = tl.where(tmp39, tmp40, tmp51) tmp53 = tl.where(tmp34, tmp35, tmp52) tmp54 = tl.full([1, 1], 12, tl.int64) tmp55 = tmp0 >= tmp54 tmp56 = tl.full([1, 1], 13, tl.int64) tmp57 = tmp0 < tmp56 tmp58 = tmp55 & tmp57 tmp59 = tl.load(in_ptr9 + (r2 + 128 * x1), tmp58 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.full([1, 1], 11, tl.int64) tmp61 = tmp0 >= tmp60 tmp62 = tmp0 < tmp54 tmp63 = tmp61 & tmp62 tmp64 = tl.load(in_ptr10 + (r2 + 128 * x1), tmp63 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.full([1, 1], 10, tl.int64) tmp66 = tmp0 >= tmp65 tmp67 = tmp0 < tmp60 tmp68 = tmp66 & tmp67 tmp69 = tl.load(in_ptr11 + (r2 + 128 * x1), tmp68 & xmask, eviction_policy='evict_last', other=0.0) tmp70 = tmp0 >= tmp32 tmp71 = tmp0 < tmp65 tmp72 = tmp70 & tmp71 tmp73 = tl.load(in_ptr12 + (r2 + 128 * x1), tmp72 & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.where(tmp72, tmp73, tmp53) tmp75 = tl.where(tmp68, tmp69, tmp74) tmp76 = tl.where(tmp63, tmp64, tmp75) tmp77 = tl.where(tmp58, tmp59, tmp76) tmp78 = tl.full([1, 1], 16, tl.int64) tmp79 = tmp0 >= tmp78 tmp80 = tl.full([1, 1], 17, tl.int64) tmp81 = tmp0 < tmp80 tmp82 = tmp79 & tmp81 tmp83 = tl.load(in_ptr13 + (r2 + 128 * x1), tmp82 & xmask, eviction_policy='evict_last', other=0.0) tmp84 = tl.full([1, 1], 15, tl.int64) tmp85 = tmp0 >= tmp84 tmp86 = tmp0 < tmp78 tmp87 = tmp85 & tmp86 tmp88 = tl.load(in_ptr14 + (r2 + 128 * x1), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full([1, 1], 14, tl.int64) tmp90 = tmp0 >= tmp89 tmp91 = tmp0 < tmp84 tmp92 = tmp90 & tmp91 tmp93 = tl.load(in_ptr15 + (r2 + 128 * x1), tmp92 & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tmp0 >= tmp56 tmp95 = tmp0 < tmp89 tmp96 = tmp94 & tmp95 tmp97 = tl.load(in_ptr16 + (r2 + 128 * x1), tmp96 & xmask, eviction_policy='evict_last', other=0.0) tmp98 = tl.where(tmp96, tmp97, tmp77) tmp99 = tl.where(tmp92, tmp93, tmp98) tmp100 = tl.where(tmp87, tmp88, tmp99) tmp101 = tl.where(tmp82, tmp83, tmp100) tmp102 = tl.full([1, 1], 20, tl.int64) tmp103 = tmp0 >= tmp102 tmp104 = tl.full([1, 1], 21, tl.int64) tmp105 = tmp0 < tmp104 tmp106 = tmp103 & tmp105 tmp107 = tl.load(in_ptr17 + (r2 + 128 * x1), tmp106 & xmask, eviction_policy='evict_last', other=0.0) tmp108 = tl.full([1, 1], 19, tl.int64) tmp109 = tmp0 >= tmp108 tmp110 = tmp0 < tmp102 tmp111 = tmp109 & tmp110 tmp112 = tl.load(in_ptr18 + (r2 + 128 * x1), tmp111 & xmask, eviction_policy='evict_last', other=0.0) tmp113 = tl.full([1, 1], 18, tl.int64) tmp114 = tmp0 >= tmp113 tmp115 = tmp0 < tmp108 tmp116 = tmp114 & tmp115 tmp117 = tl.load(in_ptr19 + (r2 + 128 * x1), tmp116 & xmask, eviction_policy='evict_last', other=0.0) tmp118 = tmp0 >= tmp80 tmp119 = tmp0 < tmp113 tmp120 = tmp118 & tmp119 tmp121 = tl.load(in_ptr20 + (r2 + 128 * x1), tmp120 & xmask, eviction_policy='evict_last', other=0.0) tmp122 = tl.where(tmp120, tmp121, tmp101) tmp123 = tl.where(tmp116, tmp117, tmp122) tmp124 = tl.where(tmp111, tmp112, tmp123) tmp125 = tl.where(tmp106, tmp107, tmp124) tmp126 = tl.full([1, 1], 24, tl.int64) tmp127 = tmp0 >= tmp126 tmp128 = tl.full([1, 1], 25, tl.int64) tmp129 = tmp0 < tmp128 tmp130 = tmp127 & tmp129 tmp131 = tl.load(in_ptr21 + (r2 + 128 * x1), tmp130 & xmask, eviction_policy='evict_last', other=0.0) tmp132 = tl.full([1, 1], 23, tl.int64) tmp133 = tmp0 >= tmp132 tmp134 = tmp0 < tmp126 tmp135 = tmp133 & tmp134 tmp136 = tl.load(in_ptr22 + (r2 + 128 * x1), tmp135 & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.full([1, 1], 22, tl.int64) tmp138 = tmp0 >= tmp137 tmp139 = tmp0 < tmp132 tmp140 = tmp138 & tmp139 tmp141 = tl.load(in_ptr23 + (r2 + 128 * x1), tmp140 & xmask, eviction_policy='evict_last', other=0.0) tmp142 = tmp0 >= tmp104 tmp143 = tmp0 < tmp137 tmp144 = tmp142 & tmp143 tmp145 = tl.load(in_ptr24 + (r2 + 128 * x1), tmp144 & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.where(tmp144, tmp145, tmp125) tmp147 = tl.where(tmp140, tmp141, tmp146) tmp148 = tl.where(tmp135, tmp136, tmp147) tmp149 = tl.where(tmp130, tmp131, tmp148) tmp150 = tl.full([1, 1], 28, tl.int64) tmp151 = tmp0 >= tmp150 tmp152 = tl.full([1, 1], 29, tl.int64) tmp153 = tmp0 < tmp152 tmp154 = tmp151 & tmp153 tmp155 = tl.load(in_ptr25 + (r2 + 128 * x1), tmp154 & xmask, eviction_policy='evict_last', other=0.0) tmp156 = tl.full([1, 1], 27, tl.int64) tmp157 = tmp0 >= tmp156 tmp158 = tmp0 < tmp150 tmp159 = tmp157 & tmp158 tmp160 = tl.load(in_ptr26 + (r2 + 128 * x1), tmp159 & xmask, eviction_policy='evict_last', other=0.0) tmp161 = tl.full([1, 1], 26, tl.int64) tmp162 = tmp0 >= tmp161 tmp163 = tmp0 < tmp156 tmp164 = tmp162 & tmp163 tmp165 = tl.load(in_ptr27 + (r2 + 128 * x1), tmp164 & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tmp0 >= tmp128 tmp167 = tmp0 < tmp161 tmp168 = tmp166 & tmp167 tmp169 = tl.load(in_ptr28 + (r2 + 128 * x1), tmp168 & xmask, eviction_policy='evict_last', other=0.0) tmp170 = tl.where(tmp168, tmp169, tmp149) tmp171 = tl.where(tmp164, tmp165, tmp170) tmp172 = tl.where(tmp159, tmp160, tmp171) tmp173 = tl.where(tmp154, tmp155, tmp172) tmp174 = tl.full([1, 1], 32, tl.int64) tmp175 = tmp0 >= tmp174 tmp176 = tl.full([1, 1], 33, tl.int64) tmp177 = tmp0 < tmp176 tmp178 = tmp175 & tmp177 tmp179 = tl.load(in_ptr29 + (r2 + 128 * x1), tmp178 & xmask, eviction_policy='evict_last', other=0.0) tmp180 = tl.full([1, 1], 31, tl.int64) tmp181 = tmp0 >= tmp180 tmp182 = tmp0 < tmp174 tmp183 = tmp181 & tmp182 tmp184 = tl.load(in_ptr30 + (r2 + 128 * x1), tmp183 & xmask, eviction_policy='evict_last', other=0.0) tmp185 = tl.full([1, 1], 30, tl.int64) tmp186 = tmp0 >= tmp185 tmp187 = tmp0 < tmp180 tmp188 = tmp186 & tmp187 tmp189 = tl.load(in_ptr31 + (r2 + 128 * x1), tmp188 & xmask, eviction_policy='evict_last', other=0.0) tmp190 = tmp0 >= tmp152 tmp191 = tmp0 < tmp185 tmp192 = tmp190 & tmp191 tmp193 = tl.load(in_ptr32 + (r2 + 128 * x1), tmp192 & xmask, eviction_policy='evict_last', other=0.0) tmp194 = tl.where(tmp192, tmp193, tmp173) tmp195 = tl.where(tmp188, tmp189, tmp194) tmp196 = tl.where(tmp183, tmp184, tmp195) tmp197 = tl.where(tmp178, tmp179, tmp196) tmp198 = tl.full([1, 1], 36, tl.int64) tmp199 = tmp0 >= tmp198 tmp200 = tl.full([1, 1], 37, tl.int64) tmp201 = tmp0 < tmp200 tmp202 = tmp199 & tmp201 tmp203 = tl.load(in_ptr33 + (r2 + 128 * x1), tmp202 & xmask, eviction_policy='evict_last', other=0.0) tmp204 = tl.full([1, 1], 35, tl.int64) tmp205 = tmp0 >= tmp204 tmp206 = tmp0 < tmp198 tmp207 = tmp205 & tmp206 tmp208 = tl.load(in_ptr34 + (r2 + 128 * x1), tmp207 & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.full([1, 1], 34, tl.int64) tmp210 = tmp0 >= tmp209 tmp211 = tmp0 < tmp204 tmp212 = tmp210 & tmp211 tmp213 = tl.load(in_ptr35 + (r2 + 128 * x1), tmp212 & xmask, eviction_policy='evict_last', other=0.0) tmp214 = tmp0 >= tmp176 tmp215 = tmp0 < tmp209 tmp216 = tmp214 & tmp215 tmp217 = tl.load(in_ptr36 + (r2 + 128 * x1), tmp216 & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.where(tmp216, tmp217, tmp197) tmp219 = tl.where(tmp212, tmp213, tmp218) tmp220 = tl.where(tmp207, tmp208, tmp219) tmp221 = tl.where(tmp202, tmp203, tmp220) tmp222 = tl.full([1, 1], 40, tl.int64) tmp223 = tmp0 >= tmp222 tmp224 = tl.full([1, 1], 41, tl.int64) tmp225 = tmp0 < tmp224 tmp226 = tmp223 & tmp225 tmp227 = tl.load(in_ptr37 + (r2 + 128 * x1), tmp226 & xmask, eviction_policy='evict_last', other=0.0) tmp228 = tl.full([1, 1], 39, tl.int64) tmp229 = tmp0 >= tmp228 tmp230 = tmp0 < tmp222 tmp231 = tmp229 & tmp230 tmp232 = tl.load(in_ptr38 + (r2 + 128 * x1), tmp231 & xmask, eviction_policy='evict_last', other=0.0) tmp233 = tl.full([1, 1], 38, tl.int64) tmp234 = tmp0 >= tmp233 tmp235 = tmp0 < tmp228 tmp236 = tmp234 & tmp235 tmp237 = tl.load(in_ptr39 + (r2 + 128 * x1), tmp236 & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tmp0 >= tmp200 tmp239 = tmp0 < tmp233 tmp240 = tmp238 & tmp239 tmp241 = tl.load(in_ptr40 + (r2 + 128 * x1), tmp240 & xmask, eviction_policy='evict_last', other=0.0) tmp242 = tl.where(tmp240, tmp241, tmp221) tmp243 = tl.where(tmp236, tmp237, tmp242) tmp244 = tl.where(tmp231, tmp232, tmp243) tmp245 = tl.where(tmp226, tmp227, tmp244) tmp246 = tl.full([1, 1], 44, tl.int64) tmp247 = tmp0 >= tmp246 tmp248 = tl.full([1, 1], 45, tl.int64) tmp249 = tmp0 < tmp248 tmp250 = tmp247 & tmp249 tmp251 = tl.load(in_ptr41 + (r2 + 128 * x1), tmp250 & xmask, eviction_policy='evict_last', other=0.0) tmp252 = tl.full([1, 1], 43, tl.int64) tmp253 = tmp0 >= tmp252 tmp254 = tmp0 < tmp246 tmp255 = tmp253 & tmp254 tmp256 = tl.load(in_ptr42 + (r2 + 128 * x1), tmp255 & xmask, eviction_policy='evict_last', other=0.0) tmp257 = tl.full([1, 1], 42, tl.int64) tmp258 = tmp0 >= tmp257 tmp259 = tmp0 < tmp252 tmp260 = tmp258 & tmp259 tmp261 = tl.load(in_ptr43 + (r2 + 128 * x1), tmp260 & xmask, eviction_policy='evict_last', other=0.0) tmp262 = tmp0 >= tmp224 tmp263 = tmp0 < tmp257 tmp264 = tmp262 & tmp263 tmp265 = tl.load(in_ptr44 + (r2 + 128 * x1), tmp264 & xmask, eviction_policy='evict_last', other=0.0) tmp266 = tl.where(tmp264, tmp265, tmp245) tmp267 = tl.where(tmp260, tmp261, tmp266) tmp268 = tl.where(tmp255, tmp256, tmp267) tmp269 = tl.where(tmp250, tmp251, tmp268) tmp270 = tl.full([1, 1], 48, tl.int64) tmp271 = tmp0 >= tmp270 tmp272 = tl.full([1, 1], 49, tl.int64) tmp273 = tmp0 < tmp272 tmp274 = tmp271 & tmp273 tmp275 = tl.load(in_ptr45 + (r2 + 128 * x1), tmp274 & xmask, eviction_policy='evict_last', other=0.0) tmp276 = tl.full([1, 1], 47, tl.int64) tmp277 = tmp0 >= tmp276 tmp278 = tmp0 < tmp270 tmp279 = tmp277 & tmp278 tmp280 = tl.load(in_ptr46 + (r2 + 128 * x1), tmp279 & xmask, eviction_policy='evict_last', other=0.0) tmp281 = tl.full([1, 1], 46, tl.int64) tmp282 = tmp0 >= tmp281 tmp283 = tmp0 < tmp276 tmp284 = tmp282 & tmp283 tmp285 = tl.load(in_ptr47 + (r2 + 128 * x1), tmp284 & xmask, eviction_policy='evict_last', other=0.0) tmp286 = tmp0 >= tmp248 tmp287 = tmp0 < tmp281 tmp288 = tmp286 & tmp287 tmp289 = tl.load(in_ptr48 + (r2 + 128 * x1), tmp288 & xmask, eviction_policy='evict_last', other=0.0) tmp290 = tl.where(tmp288, tmp289, tmp269) tmp291 = tl.where(tmp284, tmp285, tmp290) tmp292 = tl.where(tmp279, tmp280, tmp291) tmp293 = tl.where(tmp274, tmp275, tmp292) tmp294 = tl.full([1, 1], 52, tl.int64) tmp295 = tmp0 >= tmp294 tmp296 = tl.full([1, 1], 53, tl.int64) tmp297 = tmp0 < tmp296 tmp298 = tmp295 & tmp297 tmp299 = tl.load(in_ptr49 + (r2 + 128 * x1), tmp298 & xmask, eviction_policy='evict_last', other=0.0) tmp300 = tl.full([1, 1], 51, tl.int64) tmp301 = tmp0 >= tmp300 tmp302 = tmp0 < tmp294 tmp303 = tmp301 & tmp302 tmp304 = tl.load(in_ptr50 + (r2 + 128 * x1), tmp303 & xmask, eviction_policy='evict_last', other=0.0) tmp305 = tl.full([1, 1], 50, tl.int64) tmp306 = tmp0 >= tmp305 tmp307 = tmp0 < tmp300 tmp308 = tmp306 & tmp307 tmp309 = tl.load(in_ptr51 + (r2 + 128 * x1), tmp308 & xmask, eviction_policy='evict_last', other=0.0) tmp310 = tmp0 >= tmp272 tmp311 = tmp0 < tmp305 tmp312 = tmp310 & tmp311 tmp313 = tl.load(in_ptr52 + (r2 + 128 * x1), tmp312 & xmask, eviction_policy='evict_last', other=0.0) tmp314 = tl.where(tmp312, tmp313, tmp293) tmp315 = tl.where(tmp308, tmp309, tmp314) tmp316 = tl.where(tmp303, tmp304, tmp315) tmp317 = tl.where(tmp298, tmp299, tmp316) tmp318 = tl.full([1, 1], 56, tl.int64) tmp319 = tmp0 >= tmp318 tmp320 = tl.full([1, 1], 57, tl.int64) tmp321 = tmp0 < tmp320 tmp322 = tmp319 & tmp321 tmp323 = tl.load(in_ptr53 + (r2 + 128 * x1), tmp322 & xmask, eviction_policy='evict_last', other=0.0) tmp324 = tl.full([1, 1], 55, tl.int64) tmp325 = tmp0 >= tmp324 tmp326 = tmp0 < tmp318 tmp327 = tmp325 & tmp326 tmp328 = tl.load(in_ptr54 + (r2 + 128 * x1), tmp327 & xmask, eviction_policy='evict_last', other=0.0) tmp329 = tl.full([1, 1], 54, tl.int64) tmp330 = tmp0 >= tmp329 tmp331 = tmp0 < tmp324 tmp332 = tmp330 & tmp331 tmp333 = tl.load(in_ptr55 + (r2 + 128 * x1), tmp332 & xmask, eviction_policy='evict_last', other=0.0) tmp334 = tmp0 >= tmp296 tmp335 = tmp0 < tmp329 tmp336 = tmp334 & tmp335 tmp337 = tl.load(in_ptr56 + (r2 + 128 * x1), tmp336 & xmask, eviction_policy='evict_last', other=0.0) tmp338 = tl.where(tmp336, tmp337, tmp317) tmp339 = tl.where(tmp332, tmp333, tmp338) tmp340 = tl.where(tmp327, tmp328, tmp339) tmp341 = tl.where(tmp322, tmp323, tmp340) tmp342 = tl.full([1, 1], 60, tl.int64) tmp343 = tmp0 >= tmp342 tmp344 = tl.full([1, 1], 61, tl.int64) tmp345 = tmp0 < tmp344 tmp346 = tmp343 & tmp345 tmp347 = tl.load(in_ptr57 + (r2 + 128 * x1), tmp346 & xmask, eviction_policy='evict_last', other=0.0) tmp348 = tl.full([1, 1], 59, tl.int64) tmp349 = tmp0 >= tmp348 tmp350 = tmp0 < tmp342 tmp351 = tmp349 & tmp350 tmp352 = tl.load(in_ptr58 + (r2 + 128 * x1), tmp351 & xmask, eviction_policy='evict_last', other=0.0) tmp353 = tl.full([1, 1], 58, tl.int64) tmp354 = tmp0 >= tmp353 tmp355 = tmp0 < tmp348 tmp356 = tmp354 & tmp355 tmp357 = tl.load(in_ptr59 + (r2 + 128 * x1), tmp356 & xmask, eviction_policy='evict_last', other=0.0) tmp358 = tmp0 >= tmp320 tmp359 = tmp0 < tmp353 tmp360 = tmp358 & tmp359 tmp361 = tl.load(in_ptr60 + (r2 + 128 * x1), tmp360 & xmask, eviction_policy='evict_last', other=0.0) tmp362 = tl.where(tmp360, tmp361, tmp341) tmp363 = tl.where(tmp356, tmp357, tmp362) tmp364 = tl.where(tmp351, tmp352, tmp363) tmp365 = tl.where(tmp346, tmp347, tmp364) tmp366 = tl.full([1, 1], 63, tl.int64) tmp367 = tmp0 >= tmp366 tmp368 = tl.load(in_ptr61 + (r2 + 128 * x1), tmp367 & xmask, eviction_policy='evict_last', other=0.0) tmp369 = tl.full([1, 1], 62, tl.int64) tmp370 = tmp0 >= tmp369 tmp371 = tmp0 < tmp366 tmp372 = tmp370 & tmp371 tmp373 = tl.load(in_ptr62 + (r2 + 128 * x1), tmp372 & xmask, eviction_policy='evict_last', other=0.0) tmp374 = tmp0 >= tmp344 tmp375 = tmp0 < tmp369 tmp376 = tmp374 & tmp375 tmp377 = tl.load(in_ptr63 + (r2 + 128 * x1), tmp376 & xmask, eviction_policy='evict_last', other=0.0) tmp378 = tl.where(tmp376, tmp377, tmp365) tmp379 = tl.where(tmp372, tmp373, tmp378) tmp380 = tl.where(tmp367, tmp368, tmp379) tmp381 = tmp380 * tmp380 tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK]) tmp384 = tl.where(xmask, tmp382, 0) tmp385 = tl.sum(tmp384, 1)[:, None] tmp386 = libdevice.sqrt(tmp385) tl.store(in_out_ptr0 + (r2 + 128 * x3), tmp380, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp386, xmask) @triton.jit def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 4 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tmp9 = libdevice.sqrt(tmp7) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp10 / tmp13 tmp15 = triton_helpers.maximum(tmp9, tmp12) tmp16 = tmp14 / tmp15 tl.store(out_ptr0 + (r1 + 8192 * x0), tmp16, rmask & xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_3, (64, 128), (128, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) get_raw_stream(0) triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0, 16384, 128, XBLOCK=64, RBLOCK=4, num_warps=8, num_stages=1) buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf9 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf13 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf16 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf20 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf22 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf25 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf27 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf29 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf31 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf34 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf36 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf38 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf40 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf43 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf45 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf47 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf49 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf52 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf54 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf56 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf58 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf61 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf63 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf65 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf67 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf70 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf72 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf74 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf76 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf79 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf81 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf83 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf85 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf88 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf90 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf92 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf94 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf97 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf99 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf101 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf103 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf106 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf108 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf110 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf112 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf115 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf117 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf119 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf121 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf124 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf126 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf128 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf130 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf133 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf135 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf137 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf139 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf142 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf144 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf146 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) triton_poi_fused_div_sub_1[grid(2097152)](primals_1, buf0, primals_3, buf1, buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, buf70, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, buf133, buf135, buf137, buf139, buf142, buf144, buf146, 2097152, XBLOCK=512, num_warps= 8, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = empty_strided_cuda((4, 4096), (4096, 1), torch.int64) buf4 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) triton_per_fused__softmax_argmax_2[grid(16384)](buf2, buf3, buf4, buf5, 16384, 64, XBLOCK=8, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf8 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf10 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf12 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf14 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf17 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf19 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf21 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf23 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf26 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf28 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf30 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf32 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf35 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf37 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf39 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf41 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf44 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf46 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf48 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf50 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf53 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf55 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf57 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf59 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf62 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf64 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf66 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf68 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sub_sum_3[grid(512)](buf1, primals_3, buf2, buf4, buf5, buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, buf6, buf8, buf10, buf12, buf14, buf17, buf19, buf21, buf23, buf26, buf28, buf30, buf32, buf35, buf37, buf39, buf41, buf44, buf46, buf48, buf50, buf53, buf55, buf57, buf59, buf62, buf64, buf66, buf68, 512, 4096, XBLOCK=1, RBLOCK= 2048, num_warps=16, num_stages=1) buf71 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf73 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf75 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf77 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf80 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf82 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf84 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf86 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf89 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf91 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf93 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf95 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf98 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf100 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf102 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf104 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf107 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf109 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf111 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf113 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf116 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf118 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf120 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf122 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf125 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf127 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf129 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf131 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sum_4[grid(512)](buf70, buf2, buf4, buf5, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, buf71, buf73, buf75, buf77, buf80, buf82, buf84, buf86, buf89, buf91, buf93, buf95, buf98, buf100, buf102, buf104, buf107, buf109, buf111, buf113, buf116, buf118, buf120, buf122, buf125, buf127, buf129, buf131, 512, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf134 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf136 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf138 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf140 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf143 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf145 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf147 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sum_5[grid(512)](buf133, buf2, buf4, buf5, buf135, buf137, buf139, buf142, buf144, buf146, buf134, buf136, buf138, buf140, buf143, buf145, buf147, 512, 4096, XBLOCK=1, RBLOCK=1024, num_warps=16, num_stages=1) buf15 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32) buf24 = buf15 del buf15 buf33 = buf24 del buf24 buf42 = buf33 del buf33 buf51 = buf42 del buf42 buf60 = buf51 del buf51 buf69 = buf60 del buf60 buf78 = buf69 del buf69 buf87 = buf78 del buf78 buf96 = buf87 del buf87 buf105 = buf96 del buf96 buf114 = buf105 del buf105 buf123 = buf114 del buf114 buf132 = buf123 del buf123 buf141 = buf132 del buf132 buf148 = buf141 del buf141 buf149 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32) buf150 = reinterpret_tensor(buf149, (4, 64, 1), (64, 1, 1), 0) del buf149 triton_per_fused_copy_linalg_vector_norm_zeros_6[grid(256)](buf148, buf150, buf14, buf12, buf10, buf8, buf6, buf23, buf21, buf19, buf17, buf32, buf30, buf28, buf26, buf41, buf39, buf37, buf35, buf50, buf48, buf46, buf44, buf59, buf57, buf55, buf53, buf68, buf66, buf64, buf62, buf77, buf75, buf73, buf71, buf86, buf84, buf82, buf80, buf95, buf93, buf91, buf89, buf104, buf102, buf100, buf98, buf113, buf111, buf109, buf107, buf122, buf120, buf118, buf116, buf131, buf129, buf127, buf125, buf140, buf138, buf136, buf134, buf147, buf145, buf143, 256, 128, XBLOCK=8, num_warps=8, num_stages=1) del buf10 del buf100 del buf102 del buf104 del buf107 del buf109 del buf111 del buf113 del buf116 del buf118 del buf12 del buf120 del buf122 del buf125 del buf127 del buf129 del buf131 del buf134 del buf136 del buf138 del buf14 del buf140 del buf143 del buf145 del buf147 del buf17 del buf19 del buf21 del buf23 del buf26 del buf28 del buf30 del buf32 del buf35 del buf37 del buf39 del buf41 del buf44 del buf46 del buf48 del buf50 del buf53 del buf55 del buf57 del buf59 del buf6 del buf62 del buf64 del buf66 del buf68 del buf71 del buf73 del buf75 del buf77 del buf8 del buf80 del buf82 del buf84 del buf86 del buf89 del buf91 del buf93 del buf95 del buf98 buf151 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf152 = reinterpret_tensor(buf151, (4, 1), (1, 1), 0) del buf151 buf153 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32) triton_red_fused_div_linalg_vector_norm_7[grid(4)](buf152, buf148, buf150, buf153, 4, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) return (buf153, buf3, primals_2, buf1, buf2, buf4, buf5, reinterpret_tensor(primals_3, (1, 128), (128, 1), 0), buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, buf70, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, buf133, buf135, buf137, buf139, buf142, buf144, buf146, buf148, buf150, buf152) class NetVLADNew(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=64, dim=128, normalize_input=True, vladv2=False): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors alpha : float Parameter of initialization. Larger value is harder assignment. normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. vladv2 : bool If true, use vladv2 otherwise use vladv1 """ super(NetVLADNew, self).__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = 0 self.vladv2 = vladv2 self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias= vladv2) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) def init_params(self, clsts, traindescs): if self.vladv2 is False: clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True) dots = np.dot(clstsAssign, traindescs.T) dots.sort(0) dots = dots[::-1, :] self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha * clstsAssign).unsqueeze(2).unsqueeze(3)) self.conv.bias = None else: knn = NearestNeighbors(n_jobs=-1) knn.fit(traindescs) del traindescs dsSq = np.square(knn.kneighbors(clsts, 2)[1]) del knn self.alpha = (-np.log(0.01) / np.mean(dsSq[:, 1] - dsSq[:, 0]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) del clsts, dsSq self.conv.weight = nn.Parameter((2.0 * self.alpha * self. centroids).unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm (dim=1)) def forward(self, input_0): primals_3 = self.centroids primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
NikV-JS/DualVPRUtil
NetVLAD
false
8,759
[ "MIT" ]
31
6533e21641faa9156db6e8d95bb5c51cc4b7d377
https://github.com/NikV-JS/DualVPRUtil/tree/6533e21641faa9156db6e8d95bb5c51cc4b7d377
Net1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/5k/c5knxaihek7fapvcfd2krvazvh5kfwszu4bdb2ebly5gz25i3mjm.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d => convolution # x => expm1, gt, mul, mul_2, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 900) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ya/cyatzwbxlw45sv4vwidfqbgxhwzlexlvqv7a2eg6amse2ud6duqn.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => expm1_1, gt_1, mul_3, mul_5, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {}) triton_poi_fused_convolution_elu_1 = async_compile.triton('triton_poi_fused_convolution_elu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 100352 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 784) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/fz/cfzg3qdwec5lstpepdjdudu5tdtu2fyvlwlz5bpalyqxa7muzgwa.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_2 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = (xindex // 14) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/4a/c4adbs2zvcyhpyarft7tzdybxfw5cze3dac4gasti4di6pcpppuz.py # Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_3 => expm1_2, gt_2, mul_6, mul_8, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 1.0), kwargs = {}) # %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_6,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_6, %mul_8), kwargs = {}) triton_poi_fused_convolution_elu_3 = async_compile.triton('triton_poi_fused_convolution_elu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 36864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 144) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/e5/ce5fcyhq5omwppt7pa4tcux5mt66igvmzxuh5h7bzff7bs5ueaj2.py # Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_4 => expm1_3, gt_3, mul_11, mul_9, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 1.0), kwargs = {}) # %expm1_3 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_9,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_3, 1.0), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %mul_9, %mul_11), kwargs = {}) triton_poi_fused_convolution_elu_4 = async_compile.triton('triton_poi_fused_convolution_elu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ye/cyewf77lsr5xfweqavruswxzhu3mdxwitxnuanqdhbuci32jubws.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_5 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x3 = (xindex // 5) x2 = (xindex // 1600) x4 = xindex % 1600 x5 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x4 + (1664*x2)), tmp15, xmask) tl.store(out_ptr1 + (x5), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/4e/c4e6fl7aylmsjnu2tb2lgahthzjhg4vmyqnneoc6rsbcjcaxzeoy.py # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_7 => expm1_4, gt_4, mul_12, mul_14, where_4 # Graph fragment: # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm, 0), kwargs = {}) # %mul_12 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm, 1.0), kwargs = {}) # %expm1_4 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_12,), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_4, 1.0), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %mul_12, %mul_14), kwargs = {}) triton_poi_fused_elu_6 = async_compile.triton('triton_poi_fused_elu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (512, 1600), (1600, 1)) assert_size_stride(primals_11, (512, ), (1, )) assert_size_stride(primals_12, (10, 512), (512, 1)) assert_size_stride(primals_13, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 30, 30), (28800, 900, 30, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_elu_0.run(buf1, primals_2, buf2, 115200, grid=grid(115200), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 28, 28), (25088, 784, 28, 1)) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 32, 28, 28), (25088, 784, 28, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_1.run(buf4, primals_5, buf5, 100352, grid=grid(100352), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_2.run(buf5, buf6, buf7, 25088, grid=grid(25088), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 12, 12), (9216, 144, 12, 1)) buf9 = buf8; del buf8 # reuse buf10 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_3.run(buf9, primals_7, buf10, 36864, grid=grid(36864), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 10, 10), (6400, 100, 10, 1)) buf12 = buf11; del buf11 # reuse buf13 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_4.run(buf12, primals_9, buf13, 25600, grid=grid(25600), stream=stream0) del primals_9 buf14 = empty_strided_cuda((4, 64, 5, 5), (1664, 25, 5, 1), torch.int8) buf15 = empty_strided_cuda((4, 64, 5, 5), (1600, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf13, buf14, buf15, 6400, grid=grid(6400), stream=stream0) buf16 = empty_strided_cuda((4, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf15, (4, 1600), (1600, 1), 0), reinterpret_tensor(primals_10, (1600, 512), (1, 1600), 0), alpha=1, beta=1, out=buf16) del primals_11 buf17 = empty_strided_cuda((4, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.elu] triton_poi_fused_elu_6.run(buf16, buf17, 2048, grid=grid(2048), stream=stream0) buf18 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf17, reinterpret_tensor(primals_12, (512, 10), (1, 512), 0), alpha=1, beta=1, out=buf18) del primals_13 return (buf18, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf4, buf5, buf6, buf7, buf9, buf10, buf12, buf13, buf14, reinterpret_tensor(buf15, (4, 1600), (1600, 1), 0), buf16, buf17, primals_12, primals_10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((512, 1600), (1600, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((10, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net1(nn.Module): def __init__(self): super(Net1, self).__init__() self.conv1 = nn.Conv2d(3, 32, 3) self.conv2 = nn.Conv2d(32, 32, 3) self.conv3 = nn.Conv2d(32, 64, 3) self.conv4 = nn.Conv2d(64, 64, 3) self.pool1 = nn.MaxPool2d(2, 2) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 5 * 5, 512) self.fc2 = nn.Linear(512, 10) def forward(self, x): x = F.elu(self.conv1(x)) x = F.elu(self.conv2(x)) x = self.pool1(x) x = F.elu(self.conv3(x)) x = F.elu(self.conv4(x)) x = self.pool2(x) x = x.view(-1, 64 * 5 * 5) x = F.elu(self.fc1(x)) x = self.fc2(x) return x def linear_layer_ids(self): return [8, 10] def linear_layer_parameters(self): linear1 = torch.cat([x.view(-1) for x in self.fc1.parameters() or self.fc2.parameters()]) return linear1 def train_order_block_ids(self): return [[4, 5], [10, 11], [2, 3], [6, 7], [0, 1], [8, 9]] def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 900 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 784 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_elu_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 144 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp9, None) @triton.jit def triton_poi_fused_convolution_elu_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x3 = xindex // 5 x2 = xindex // 1600 x4 = xindex % 1600 x5 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x3), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x4 + 1664 * x2), tmp15, xmask) tl.store(out_ptr1 + x5, tmp16, xmask) @triton.jit def triton_poi_fused_elu_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (512, 1600), (1600, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (10, 512), (512, 1)) assert_size_stride(primals_13, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 30, 30), (28800, 900, 30, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(115200)](buf1, primals_2, buf2, 115200, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 28, 28), (25088, 784, 28, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 32, 28, 28), (25088, 784, 28, 1), torch.float32) triton_poi_fused_convolution_elu_1[grid(100352)](buf4, primals_5, buf5, 100352, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_2[grid(25088)](buf5, buf6, buf7, 25088, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 12, 12), (9216, 144, 12, 1)) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.float32) triton_poi_fused_convolution_elu_3[grid(36864)](buf9, primals_7, buf10, 36864, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 10, 10), (6400, 100, 10, 1)) buf12 = buf11 del buf11 buf13 = empty_strided_cuda((4, 64, 10, 10), (6400, 100, 10, 1), torch.float32) triton_poi_fused_convolution_elu_4[grid(25600)](buf12, primals_9, buf13, 25600, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf14 = empty_strided_cuda((4, 64, 5, 5), (1664, 25, 5, 1), torch.int8) buf15 = empty_strided_cuda((4, 64, 5, 5), (1600, 25, 5, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_5[grid(6400)](buf13, buf14, buf15, 6400, XBLOCK=256, num_warps=4, num_stages=1) buf16 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf15, (4, 1600 ), (1600, 1), 0), reinterpret_tensor(primals_10, (1600, 512), ( 1, 1600), 0), alpha=1, beta=1, out=buf16) del primals_11 buf17 = empty_strided_cuda((4, 512), (512, 1), torch.float32) triton_poi_fused_elu_6[grid(2048)](buf16, buf17, 2048, XBLOCK=256, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_13, buf17, reinterpret_tensor( primals_12, (512, 10), (1, 512), 0), alpha=1, beta=1, out=buf18) del primals_13 return (buf18, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf4, buf5, buf6, buf7, buf9, buf10, buf12, buf13, buf14, reinterpret_tensor(buf15, (4, 1600), (1600, 1), 0), buf16, buf17, primals_12, primals_10) class Net1New(nn.Module): def __init__(self): super(Net1New, self).__init__() self.conv1 = nn.Conv2d(3, 32, 3) self.conv2 = nn.Conv2d(32, 32, 3) self.conv3 = nn.Conv2d(32, 64, 3) self.conv4 = nn.Conv2d(64, 64, 3) self.pool1 = nn.MaxPool2d(2, 2) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 5 * 5, 512) self.fc2 = nn.Linear(512, 10) def linear_layer_ids(self): return [8, 10] def linear_layer_parameters(self): linear1 = torch.cat([x.view(-1) for x in self.fc1.parameters() or self.fc2.parameters()]) return linear1 def train_order_block_ids(self): return [[4, 5], [10, 11], [2, 3], [6, 7], [0, 1], [8, 9]] def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc1.weight primals_11 = self.fc1.bias primals_12 = self.fc2.weight primals_13 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
SarodYatawatta/federated-pytorch-test
Net1
false
8,760
[ "Apache-2.0" ]
33
42a51ba12a92b32fa19273340d5b61e74e11d8e0
https://github.com/SarodYatawatta/federated-pytorch-test/tree/42a51ba12a92b32fa19273340d5b61e74e11d8e0
FeatureMapPairEncoderV2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/q5/cq5aipuk6deqwqvu7wwfhdeucuxv5t5nvltpgshc4ispaexr74sm.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 24576 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 96 y1 = (yindex // 96) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (96*x2) + (864*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/hd/chdam3fgly5fytb3wblunaixxdjf6qyovsld6nxrvlrnidxhtqtk.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xl/cxlxpy5fmo5u4nwndqxqsjjwwb7gemphwqhskf4evwtbnq5zd6lc.py # Topologically Sorted Source Nodes: [imgs], Original ATen: [aten.cat] # Source node to ATen node mapping: # imgs => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %sub], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1572864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 96 x1 = (xindex // 96) % 4096 x2 = (xindex // 393216) x3 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + (4096*x0) + (131072*x2)), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 64, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x1 + (4096*((-32) + x0)) + (131072*x2)), tmp9, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 96, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr0 + (x1 + (4096*((-64) + x0)) + (131072*x2)), tmp11, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + (x1 + (4096*((-64) + x0)) + (131072*x2)), tmp11, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp11, tmp16, tmp17) tmp19 = tl.where(tmp9, tmp10, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + (x3), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/bl/cblqfqmoslgwutocsvudvhc2kivn5ssdiocdc3vs4l5vndguaykd.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3936256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xm/cxmlqc5z655ft3mgd5tam3nilq7ks6kppvpkostak4hbgn3vg3gt.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_5, %primals_6, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 3600 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = (yindex // 512) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (1843200*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (3600*y3)), tmp4, xmask) tl.store(out_ptr1 + (y0 + (512*x2) + (1843200*y1)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 32, 64, 64), (131072, 4096, 64, 1)) assert_size_stride(primals_2, (4, 32, 64, 64), (131072, 4096, 64, 1)) assert_size_stride(primals_3, (256, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_4, (256, ), (1, )) assert_size_stride(primals_5, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_6, (512, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 96, 3, 3), (864, 1, 288, 96), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_3, buf0, 24576, 9, grid=grid(24576, 9), stream=stream0) del primals_3 buf1 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_5, buf1, 131072, 9, grid=grid(131072, 9), stream=stream0) del primals_5 buf2 = empty_strided_cuda((4, 96, 64, 64), (393216, 1, 6144, 96), torch.float32) # Topologically Sorted Source Nodes: [imgs], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(primals_1, primals_2, buf2, 1572864, grid=grid(1572864), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 62, 62), (984064, 1, 15872, 256)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf4, primals_4, 3936256, grid=grid(3936256), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 512, 60, 60), (1843200, 1, 30720, 512)) buf6 = empty_strided_cuda((4, 512, 60, 60), (1843200, 3600, 60, 1), torch.float32) buf7 = empty_strided_cuda((4, 512, 60, 60), (1843200, 1, 30720, 512), torch.bool) # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_4.run(buf5, primals_6, buf6, buf7, 2048, 3600, grid=grid(2048, 3600), stream=stream0) del buf5 del primals_6 return (reinterpret_tensor(buf6, (4, 1843200), (1843200, 1), 0), buf0, buf1, buf2, buf4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 32, 64, 64), (131072, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 32, 64, 64), (131072, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class FeatureMapPairEncoderV2(nn.Module): def __init__(self, init_scale=1.0, no_weight_init=False): super(FeatureMapPairEncoderV2, self).__init__() self.conv1 = nn.Conv2d(96, 256, kernel_size=3, stride=1) self.conv2 = nn.Conv2d(256, 512, kernel_size=3, stride=1) if not no_weight_init: for layer in (self.conv1, self.conv2): nn.init.orthogonal_(layer.weight, init_scale) def forward(self, src_imgs, dst_imgs): imgs = torch.cat([src_imgs, dst_imgs, src_imgs - dst_imgs], dim=1) x = F.relu(self.conv1(imgs)) x = F.relu(self.conv2(x)) return x.view(x.size(0), -1) def get_inputs(): return [torch.rand([4, 32, 64, 64]), torch.rand([4, 32, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 96 y1 = yindex // 96 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 96 * x2 + 864 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 96 x1 = xindex // 96 % 4096 x2 = xindex // 393216 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + 4096 * x0 + 131072 * x2), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 64, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x1 + 4096 * (-32 + x0) + 131072 * x2), tmp9, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 96, tl.int64) tmp14 = tl.load(in_ptr0 + (x1 + 4096 * (-64 + x0) + 131072 * x2), tmp11, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + (x1 + 4096 * (-64 + x0) + 131072 * x2), tmp11, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp11, tmp16, tmp17) tmp19 = tl.where(tmp9, tmp10, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + x3, tmp20, None) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): xnumel = 3600 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 1843200 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 3600 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 512 * x2 + 1843200 * y1), tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 32, 64, 64), (131072, 4096, 64, 1)) assert_size_stride(primals_2, (4, 32, 64, 64), (131072, 4096, 64, 1)) assert_size_stride(primals_3, (256, 96, 3, 3), (864, 9, 3, 1)) assert_size_stride(primals_4, (256,), (1,)) assert_size_stride(primals_5, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_6, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 96, 3, 3), (864, 1, 288, 96), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(24576, 9)](primals_3, buf0, 24576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(131072, 9)](primals_5, buf1, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_5 buf2 = empty_strided_cuda((4, 96, 64, 64), (393216, 1, 6144, 96), torch.float32) triton_poi_fused_cat_2[grid(1572864)](primals_1, primals_2, buf2, 1572864, XBLOCK=1024, num_warps=4, num_stages=1) del primals_1 del primals_2 buf3 = extern_kernels.convolution(buf2, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 62, 62), (984064, 1, 15872, 256)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_3[grid(3936256)](buf4, primals_4, 3936256, XBLOCK=1024, num_warps=4, num_stages=1) del primals_4 buf5 = extern_kernels.convolution(buf4, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 512, 60, 60), (1843200, 1, 30720, 512)) buf6 = empty_strided_cuda((4, 512, 60, 60), (1843200, 3600, 60, 1), torch.float32) buf7 = empty_strided_cuda((4, 512, 60, 60), (1843200, 1, 30720, 512 ), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_4[grid(2048, 3600) ](buf5, primals_6, buf6, buf7, 2048, 3600, XBLOCK=64, YBLOCK=64, num_warps=8, num_stages=1) del buf5 del primals_6 return reinterpret_tensor(buf6, (4, 1843200), (1843200, 1), 0 ), buf0, buf1, buf2, buf4, buf7 class FeatureMapPairEncoderV2New(nn.Module): def __init__(self, init_scale=1.0, no_weight_init=False): super(FeatureMapPairEncoderV2New, self).__init__() self.conv1 = nn.Conv2d(96, 256, kernel_size=3, stride=1) self.conv2 = nn.Conv2d(256, 512, kernel_size=3, stride=1) if not no_weight_init: for layer in (self.conv1, self.conv2): nn.init.orthogonal_(layer.weight, init_scale) def forward(self, input_0, input_1): primals_3 = self.conv1.weight primals_4 = self.conv1.bias primals_5 = self.conv2.weight primals_6 = self.conv2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
KH-Kyle/rmp_nav
FeatureMapPairEncoderV2
false
8,761
[ "MIT" ]
30
d598fe70664a4cdc0e9b9dd4b52e84aa3de1b551
https://github.com/KH-Kyle/rmp_nav/tree/d598fe70664a4cdc0e9b9dd4b52e84aa3de1b551
G_Small
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/pl/cpl6jhqnafnew6pronlksao6s33bcslpzm3k66a74phcycboxpwr.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (1024*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/eb/cebfmp3xsydvhof2vuiuzrtwr7fwapeufpm52glmntqds2lsygkv.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/q4/cq46xznhsxlsfekongioaargva4mzxnrgtlpbrrnhugzm5zrigaf.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (1024*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/j3/cj3fmdhhayqju7oi6cghha6ztxe3k6a4nkplf3ytoh6ngdwwzu7z.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 1024], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (1024*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (y0 + (64*x2) + (65536*y1)), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + (64*x2) + (65536*y1)), tmp7, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/iz/ciztmvwhv6emnancfpy7cbrz6fbor5fknq7e6dfwrlphuy4zy66p.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_2 => convolution_1 # x_3 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {}) # %where_1 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/b5/cb5iajevrdo3fbl5mipzyzddlpduxqhwtlbdflw2gup5i3z247kz.py # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_4 => convolution_2 # x_5 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {}) # %where_2 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused_convolution_leaky_relu_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/dr/cdr7wjqsdk2p5ntjftsxepatlnakzdou7akohpoxs4254rwwy7uh.py # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_6 => convolution_3 # x_7 => gt_3, mul_3, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {}) # %where_3 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ws/cwsloz2nozeca4pbc5td32dg2nwfiwwaawcofnu4kllep4dyfibq.py # Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_8 => convolution_4 # x_9 => gt_4, mul_4, where_4 # Graph fragment: # %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_10, %primals_11, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {}) # %where_4 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) triton_poi_fused_convolution_leaky_relu_7 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/by/cbyuoo67qj25di4wzzzqcnljridqmsdvfw6yv42kpcdkt3j5tijh.py # Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_10 => convolution_5 # x_11 => gt_5, mul_5, where_5 # Graph fragment: # %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_12, %primals_13, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_5 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 0.2), kwargs = {}) # %where_5 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_5, %mul_5), kwargs = {}) triton_poi_fused_convolution_leaky_relu_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ky/cky2wewewh75fidvmgc75rn34mqutgqo72olv7c7ywm6safu7neu.py # Topologically Sorted Source Nodes: [d], Original ATen: [aten.cat] # Source node to ATen node mapping: # d => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %where_5], 1), kwargs = {}) triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/mr/cmrxn2w2kzg4xucbro2b2yyj7kcomwo6vowwdmcnzrtztn63ir4r.py # Topologically Sorted Source Nodes: [d_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_1, %where_4], 1), kwargs = {}) triton_poi_fused_cat_10 = async_compile.triton('triton_poi_fused_cat_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ak/cakh7qvd4rmaebq6nb366mnbyaq4mnjen5rug3lqlxfnckkpranh.py # Topologically Sorted Source Nodes: [d_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_2, %where_3], 1), kwargs = {}) triton_poi_fused_cat_11 = async_compile.triton('triton_poi_fused_cat_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/wj/cwj6yqdhzukngdx76z3e47h7tsxa5fhdk5no73qs5teskgq7wfec.py # Topologically Sorted Source Nodes: [d_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_3 => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_3, %where_2], 1), kwargs = {}) triton_poi_fused_cat_12 = async_compile.triton('triton_poi_fused_cat_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ol/colnjxajhneas5qty6eu7cprkaqwiwz3s7bfjwhr3cxvpqoglxj7.py # Topologically Sorted Source Nodes: [d_4], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_4 => cat_4 # Graph fragment: # %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_4, %where_1], 1), kwargs = {}) triton_poi_fused_cat_13 = async_compile.triton('triton_poi_fused_cat_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/o4/co42hdvcxdbdwalpuyppjhd2yhgle4abuqguymvzibhnbk6fsjnt.py # Topologically Sorted Source Nodes: [d_5], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_5 => cat_5 # Graph fragment: # %cat_5 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_5, %where], 1), kwargs = {}) triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/kn/ckndmhylafj2xjjbojaldf3kvsx6yrfcydzry3bpi4vgys5rzocj.py # Topologically Sorted Source Nodes: [x_29, x_30], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_29 => convolution_13 # x_30 => relu_6 # Graph fragment: # %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_5, %primals_28, %primals_29, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_15 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_15(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + (x0), tmp5, None) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/qz/cqzs6uatjxsyi5rzxclwzggmrfa73yocsguoqutckcw57afpanm7.py # Topologically Sorted Source Nodes: [x_27, x_28], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_27 => convolution_12 # x_28 => relu_5 # Graph fragment: # %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_4, %primals_26, %primals_27, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_12,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_16 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/mz/cmzim3gw2243r7f2ygq4swceufpovbbk4ueefpzrzrlu5urpsm6g.py # Topologically Sorted Source Nodes: [x_25, x_26], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_25 => convolution_11 # x_26 => relu_4 # Graph fragment: # %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_24, %primals_25, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_17 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/rd/crdw4ivtgbr3sclm6qgr5d6vbt5wrxvpjncbynksybiipsuuk3jl.py # Topologically Sorted Source Nodes: [x_23, x_24], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_23 => convolution_10 # x_24 => relu_3 # Graph fragment: # %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_22, %primals_23, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_18 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xx/cxxulyqvngs25yt4sgkzdlp7qeq66rdujb277tfr26ok46jatda2.py # Topologically Sorted Source Nodes: [x_20, x_22], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_20 => convolution_9 # x_22 => relu_2 # Graph fragment: # %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_20, %primals_21, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {}) # %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_19 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/f6/cf6spwua6bfx4qoztvryiqg7w36exesl5wohnwhh6kqcd2vh63ci.py # Topologically Sorted Source Nodes: [x_17, x_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_17 => convolution_8 # x_19 => relu_1 # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_18, %primals_19, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {}) # %le_5 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_20 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/a3/ca3gocijxcch6odqpvh3znou4g5x6uzflpnr5fewwrooqeusqqkb.py # Topologically Sorted Source Nodes: [x_14, x_16], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_14 => convolution_7 # x_16 => relu # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_6, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) # %le_6 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_21 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29 = args args.clear() assert_size_stride(primals_1, (64, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_11, (64, ), (1, )) assert_size_stride(primals_12, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_13, (64, ), (1, )) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64, ), (1, )) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64, ), (1, )) assert_size_stride(primals_18, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_19, (64, ), (1, )) assert_size_stride(primals_20, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_21, (64, ), (1, )) assert_size_stride(primals_22, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_23, (64, ), (1, )) assert_size_stride(primals_24, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_25, (64, ), (1, )) assert_size_stride(primals_26, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_27, (64, ), (1, )) assert_size_stride(primals_28, (128, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_29, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_4, buf0, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_4 buf1 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_6, buf1, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_6 buf2 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_8, buf2, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_8 buf3 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_10, buf3, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_10 buf4 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_12, buf4, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_12 buf5 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_14, buf5, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_14 buf6 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_16, buf6, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_16 buf7 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_18, buf7, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_18 buf8 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_20, buf8, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_20 buf9 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_22, buf9, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_22 buf10 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_24, buf10, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_24 buf11 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_26, buf11, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_26 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf13 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.bool) buf14 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_3.run(buf12, primals_2, buf13, buf14, 256, 1024, grid=grid(256, 1024), stream=stream0) del buf12 del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf14, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 16, 16), (16384, 1, 1024, 64)) buf16 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.bool) buf17 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf15, primals_5, buf16, buf17, 65536, grid=grid(65536), stream=stream0) del buf15 del primals_5 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, buf1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 8, 8), (4096, 1, 512, 64)) buf19 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.bool) buf20 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.float32) # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_5.run(buf18, primals_7, buf19, buf20, 16384, grid=grid(16384), stream=stream0) del buf18 del primals_7 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf20, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 64, 4, 4), (1024, 1, 256, 64)) buf22 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch.bool) buf23 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf21, primals_9, buf22, buf23, 4096, grid=grid(4096), stream=stream0) del buf21 del primals_9 # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, buf3, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 2, 2), (256, 1, 128, 64)) buf25 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch.bool) buf26 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch.float32) # Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_7.run(buf24, primals_11, buf25, buf26, 1024, grid=grid(1024), stream=stream0) del buf24 del primals_11 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf26, buf4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 64, 1, 1), (64, 1, 64, 64)) buf28 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) buf29 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.float32) # Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_8.run(buf27, primals_13, buf28, buf29, 256, grid=grid(256), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 64, 1, 1), (64, 1, 64, 64)) buf31 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) buf32 = buf27; del buf27 # reuse # Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_8.run(buf30, primals_15, buf31, buf32, 256, grid=grid(256), stream=stream0) del buf30 del primals_15 # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] buf33 = extern_kernels.convolution(buf32, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 64, 1, 1), (64, 1, 64, 64)) buf34 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [d], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(buf33, primals_17, buf29, buf34, 512, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution] buf35 = extern_kernels.convolution(buf34, buf7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 64, 2, 2), (256, 1, 128, 64)) buf36 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) # Topologically Sorted Source Nodes: [d_1], Original ATen: [aten.cat] triton_poi_fused_cat_10.run(buf35, primals_19, buf26, buf36, 2048, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution] buf37 = extern_kernels.convolution(buf36, buf8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 64, 4, 4), (1024, 1, 256, 64)) buf38 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.float32) # Topologically Sorted Source Nodes: [d_2], Original ATen: [aten.cat] triton_poi_fused_cat_11.run(buf37, primals_21, buf23, buf38, 8192, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.convolution] buf39 = extern_kernels.convolution(buf38, buf9, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 64, 8, 8), (4096, 1, 512, 64)) buf40 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.float32) # Topologically Sorted Source Nodes: [d_3], Original ATen: [aten.cat] triton_poi_fused_cat_12.run(buf39, primals_23, buf20, buf40, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.convolution] buf41 = extern_kernels.convolution(buf40, buf10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf41, (4, 64, 16, 16), (16384, 1, 1024, 64)) buf42 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) # Topologically Sorted Source Nodes: [d_4], Original ATen: [aten.cat] triton_poi_fused_cat_13.run(buf41, primals_25, buf17, buf42, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution] buf43 = extern_kernels.convolution(buf42, buf11, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf44 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) # Topologically Sorted Source Nodes: [d_5], Original ATen: [aten.cat] triton_poi_fused_cat_14.run(buf43, primals_27, buf14, buf44, 524288, grid=grid(524288), stream=stream0) # Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.convolution] buf45 = extern_kernels.convolution(buf44, primals_28, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 1, 64, 64), (4096, 1, 64, 1)) buf46 = reinterpret_tensor(buf45, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf45 # reuse buf47 = empty_strided_cuda((4, 1, 64, 64), (4096, 1, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_29, x_30], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_15.run(buf46, primals_29, buf47, 16384, grid=grid(16384), stream=stream0) del primals_29 buf48 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.bool) # Topologically Sorted Source Nodes: [x_27, x_28], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_16.run(buf43, primals_27, buf48, 262144, grid=grid(262144), stream=stream0) del buf43 del primals_27 buf49 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.bool) # Topologically Sorted Source Nodes: [x_25, x_26], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_17.run(buf41, primals_25, buf49, 65536, grid=grid(65536), stream=stream0) del buf41 del primals_25 buf50 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.bool) # Topologically Sorted Source Nodes: [x_23, x_24], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_18.run(buf39, primals_23, buf50, 16384, grid=grid(16384), stream=stream0) del buf39 del primals_23 buf51 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch.bool) # Topologically Sorted Source Nodes: [x_20, x_22], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_19.run(buf37, primals_21, buf51, 4096, grid=grid(4096), stream=stream0) del buf37 del primals_21 buf52 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch.bool) # Topologically Sorted Source Nodes: [x_17, x_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_20.run(buf35, primals_19, buf52, 1024, grid=grid(1024), stream=stream0) del buf35 del primals_19 buf53 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) # Topologically Sorted Source Nodes: [x_14, x_16], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_21.run(buf33, primals_17, buf53, 256, grid=grid(256), stream=stream0) del buf33 del primals_17 return (buf46, primals_1, primals_3, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, primals_28, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf36, buf38, buf40, buf42, buf44, buf47, buf48, buf49, buf50, buf51, buf52, buf53, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((128, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Conv2d, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.dropout is not None: x = self.dropout(x) x = self.activation(x) return x class Deconv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Deconv2d, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.dropout is not None: x = self.dropout(x) x = self.activation(x) return x class G_Small(nn.Module): def __init__(self): super(G_Small, self).__init__() self.encoder_1 = Conv2d(1, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_2 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_3 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_4 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_5 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_6 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_7 = Conv2d(64, 64, 3, stride=1, bn=False, activation= 'leakyrelu', dropout=False) self.decoder_1 = Deconv2d(64, 64, 3, stride=1, bn=False, activation ='relu', dropout=True) self.decoder_2 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=True) self.decoder_3 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=True) self.decoder_4 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_5 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_6 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_7 = Deconv2d(128, 1, 4, stride=2, bn=False, activation ='relu', dropout=False) def forward(self, x): e1 = self.encoder_1(x) e2 = self.encoder_2(e1) e3 = self.encoder_3(e2) e4 = self.encoder_4(e3) e5 = self.encoder_5(e4) e6 = self.encoder_6(e5) e7 = self.encoder_7(e6) d = self.decoder_1(e7) d = torch.cat((d, e6), dim=1) d = self.decoder_2(d) d = torch.cat((d, e5), dim=1) d = self.decoder_3(d) d = torch.cat((d, e4), dim=1) d = self.decoder_4(d) d = torch.cat((d, e3), dim=1) d = self.decoder_5(d) d = torch.cat((d, e2), dim=1) d = self.decoder_6(d) d = torch.cat((d, e1), dim=1) d = self.decoder_7(d) return d def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (y0 + 64 * x2 + 65536 * y1), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 64 * x2 + 65536 * y1), tmp7, xmask & ymask) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_cat_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_15(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, None) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29) = args args.clear() assert_size_stride(primals_1, (64, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_21, (64,), (1,)) assert_size_stride(primals_22, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_27, (64,), (1,)) assert_size_stride(primals_28, (128, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_29, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(4096, 16)](primals_4, buf0, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_0[grid(4096, 16)](primals_6, buf1, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf2 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_0[grid(4096, 16)](primals_8, buf2, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_0[grid(4096, 16)](primals_10, buf3, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_0[grid(4096, 16)](primals_12, buf4, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf5 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_1[grid(4096, 9)](primals_14, buf5, 4096, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf6 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_1[grid(4096, 9)](primals_16, buf6, 4096, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf7 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_18, buf7, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf8 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_20, buf8, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf9 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_22, buf9, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf10 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_24, buf10, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf11 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_26, buf11, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf12 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf13 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.bool) buf14 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) triton_poi_fused_convolution_leaky_relu_3[grid(256, 1024)](buf12, primals_2, buf13, buf14, 256, 1024, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf12 del primals_2 buf15 = extern_kernels.convolution(buf14, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 16, 16), (16384, 1, 1024, 64)) buf16 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.bool) buf17 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) triton_poi_fused_convolution_leaky_relu_4[grid(65536)](buf15, primals_5, buf16, buf17, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf15 del primals_5 buf18 = extern_kernels.convolution(buf17, buf1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 8, 8), (4096, 1, 512, 64)) buf19 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch .bool) buf20 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch .float32) triton_poi_fused_convolution_leaky_relu_5[grid(16384)](buf18, primals_7, buf19, buf20, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf18 del primals_7 buf21 = extern_kernels.convolution(buf20, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 64, 4, 4), (1024, 1, 256, 64)) buf22 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch .bool) buf23 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_convolution_leaky_relu_6[grid(4096)](buf21, primals_9, buf22, buf23, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf21 del primals_9 buf24 = extern_kernels.convolution(buf23, buf3, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 2, 2), (256, 1, 128, 64)) buf25 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch.bool ) buf26 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch. float32) triton_poi_fused_convolution_leaky_relu_7[grid(1024)](buf24, primals_11, buf25, buf26, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf24 del primals_11 buf27 = extern_kernels.convolution(buf26, buf4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 64, 1, 1), (64, 1, 64, 64)) buf28 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) buf29 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch. float32) triton_poi_fused_convolution_leaky_relu_8[grid(256)](buf27, primals_13, buf28, buf29, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_13 buf30 = extern_kernels.convolution(buf29, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 64, 1, 1), (64, 1, 64, 64)) buf31 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) buf32 = buf27 del buf27 triton_poi_fused_convolution_leaky_relu_8[grid(256)](buf30, primals_15, buf31, buf32, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf30 del primals_15 buf33 = extern_kernels.convolution(buf32, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 64, 1, 1), (64, 1, 64, 64)) buf34 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.float32) triton_poi_fused_cat_9[grid(512)](buf33, primals_17, buf29, buf34, 512, XBLOCK=128, num_warps=4, num_stages=1) buf35 = extern_kernels.convolution(buf34, buf7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 64, 2, 2), (256, 1, 128, 64)) buf36 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) triton_poi_fused_cat_10[grid(2048)](buf35, primals_19, buf26, buf36, 2048, XBLOCK=256, num_warps=4, num_stages=1) buf37 = extern_kernels.convolution(buf36, buf8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 64, 4, 4), (1024, 1, 256, 64)) buf38 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_cat_11[grid(8192)](buf37, primals_21, buf23, buf38, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf39 = extern_kernels.convolution(buf38, buf9, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 64, 8, 8), (4096, 1, 512, 64)) buf40 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.float32) triton_poi_fused_cat_12[grid(32768)](buf39, primals_23, buf20, buf40, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf41 = extern_kernels.convolution(buf40, buf10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf41, (4, 64, 16, 16), (16384, 1, 1024, 64)) buf42 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) triton_poi_fused_cat_13[grid(131072)](buf41, primals_25, buf17, buf42, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf43 = extern_kernels.convolution(buf42, buf11, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf44 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) triton_poi_fused_cat_14[grid(524288)](buf43, primals_27, buf14, buf44, 524288, XBLOCK=512, num_warps=8, num_stages=1) buf45 = extern_kernels.convolution(buf44, primals_28, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 1, 64, 64), (4096, 1, 64, 1)) buf46 = reinterpret_tensor(buf45, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf45 buf47 = empty_strided_cuda((4, 1, 64, 64), (4096, 1, 64, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_15[grid(16384)]( buf46, primals_29, buf47, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_29 buf48 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_16[grid(262144)]( buf43, primals_27, buf48, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf43 del primals_27 buf49 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_17[grid(65536)]( buf41, primals_25, buf49, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf41 del primals_25 buf50 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_18[grid(16384)]( buf39, primals_23, buf50, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf39 del primals_23 buf51 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_19[grid(4096)]( buf37, primals_21, buf51, 4096, XBLOCK=128, num_warps=4, num_stages=1) del buf37 del primals_21 buf52 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_20[grid(1024)]( buf35, primals_19, buf52, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf35 del primals_19 buf53 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_21[grid(256)]( buf33, primals_17, buf53, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf33 del primals_17 return (buf46, primals_1, primals_3, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, primals_28, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf36, buf38, buf40, buf42, buf44, buf47, buf48, buf49, buf50, buf51, buf52, buf53) class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Conv2d, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.dropout is not None: x = self.dropout(x) x = self.activation(x) return x class Deconv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Deconv2d, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.dropout is not None: x = self.dropout(x) x = self.activation(x) return x class G_SmallNew(nn.Module): def __init__(self): super(G_SmallNew, self).__init__() self.encoder_1 = Conv2d(1, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_2 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_3 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_4 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_5 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_6 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_7 = Conv2d(64, 64, 3, stride=1, bn=False, activation= 'leakyrelu', dropout=False) self.decoder_1 = Deconv2d(64, 64, 3, stride=1, bn=False, activation ='relu', dropout=True) self.decoder_2 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=True) self.decoder_3 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=True) self.decoder_4 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_5 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_6 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_7 = Deconv2d(128, 1, 4, stride=2, bn=False, activation ='relu', dropout=False) def forward(self, input_0): primals_1 = self.encoder_1.conv.weight primals_2 = self.encoder_1.conv.bias primals_4 = self.encoder_2.conv.weight primals_5 = self.encoder_2.conv.bias primals_6 = self.encoder_3.conv.weight primals_7 = self.encoder_3.conv.bias primals_8 = self.encoder_4.conv.weight primals_9 = self.encoder_4.conv.bias primals_10 = self.encoder_5.conv.weight primals_11 = self.encoder_5.conv.bias primals_12 = self.encoder_6.conv.weight primals_13 = self.encoder_6.conv.bias primals_14 = self.encoder_7.conv.weight primals_15 = self.encoder_7.conv.bias primals_16 = self.decoder_1.conv.weight primals_17 = self.decoder_1.conv.bias primals_18 = self.decoder_2.conv.weight primals_19 = self.decoder_2.conv.bias primals_20 = self.decoder_3.conv.weight primals_21 = self.decoder_3.conv.bias primals_22 = self.decoder_4.conv.weight primals_23 = self.decoder_4.conv.bias primals_24 = self.decoder_5.conv.weight primals_25 = self.decoder_5.conv.bias primals_26 = self.decoder_6.conv.weight primals_27 = self.decoder_6.conv.bias primals_28 = self.decoder_7.conv.weight primals_29 = self.decoder_7.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return output[0]
RQuispeC/pytorch-ACSCP
G_Small
false
8,762
[ "MIT" ]
25
c83f08632012c2245250ff9c5140814461db575c
https://github.com/RQuispeC/pytorch-ACSCP/tree/c83f08632012c2245250ff9c5140814461db575c
Net2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/xq/cxqxvwhuevcb5oe7gsgfej3rmce7mdc6vqg3mkviccgugis2c7ro.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/az/cazvf33aclbntgyixs3zlm6bdzs672xtry2xl6pc3l3sjzwrnks5.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xs/cxsarnw2wm2gid2judloczqftyialh3etpmvbejw7tuglcm5m2ir.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/eg/cegkhpirbdizrfwpxf7z5iyj5fnizpk2ywd44ximmmde2qo6slhl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/7b/c7bbwyvole23x2v7i53wmwj7rky54jyyg54fpdfn5ds2jdfkoyaq.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ar/carn2mg5lupz3lxons37n746n4rfmsv23xjvostd3syds3oglwp2.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d => convolution # x => expm1, gt, mul, mul_2, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_5 = async_compile.triton('triton_poi_fused_convolution_elu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zv/czvqge5ahupojnvz2v7h44ldtkuavcrkodam2sukxqiix3phrxgp.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 32 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/b2/cb2sfzroyoxchqx3cr3avag76mlmvkcxotnwxafekmxywxhzidux.py # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_2 => expm1_1, gt_1, mul_3, mul_5, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {}) triton_poi_fused_convolution_elu_7 = async_compile.triton('triton_poi_fused_convolution_elu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/5e/c5e3ctm73sqrl6erzojlu6nmx4xtjv6f6ujhufzvnjv5pwrl5p7r.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_3 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) % 16 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (256*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + (256*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/sz/cszm6vdzb6mb4b4ml3xizu45viqrla5l2yh5635oxdyjsdowmwr7.py # Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_4 => expm1_2, gt_2, mul_6, mul_8, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 1.0), kwargs = {}) # %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_6,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_6, %mul_8), kwargs = {}) triton_poi_fused_convolution_elu_9 = async_compile.triton('triton_poi_fused_convolution_elu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/7f/c7fwneqy7a24hb73o3mp4pzr6b2pel5hiw6qfzsrpjjfjqigw5w7.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_5 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = (xindex // 256) % 8 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + (512*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xn/cxnxyghtee3hfl6a74e2ey4rqjygwrln5zypgyxf5fseymwrpfj5.py # Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_6 => expm1_3, gt_3, mul_11, mul_9, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 1.0), kwargs = {}) # %expm1_3 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_9,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_3, 1.0), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %mul_9, %mul_11), kwargs = {}) triton_poi_fused_convolution_elu_11 = async_compile.triton('triton_poi_fused_convolution_elu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/iq/ciqm5cll7gby6f2dxlejez3op7wo3nmsdaikqy6w7ahykcwusapd.py # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_7 => _low_memory_max_pool2d_with_offsets_3, getitem_7 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 512], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 512 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y5 = yindex y4 = (yindex // 16) y6 = yindex % 16 tmp0 = tl.load(in_ptr0 + (x2 + (1024*y0) + (8192*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (512 + x2 + (1024*y0) + (8192*y1)), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4096 + x2 + (1024*y0) + (8192*y1)), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (4608 + x2 + (1024*y0) + (8192*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + (512*y5)), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + (16*x2) + (8192*y4)), tmp16, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/kb/ckblopawveaa4fcohzc5c56jfd2kkpiv3mymvxmp7uxqm7lgbede.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_9 => expm1_4, gt_4, mul_12, mul_14, where_4 # Graph fragment: # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm, 0), kwargs = {}) # %mul_12 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm, 1.0), kwargs = {}) # %expm1_4 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_12,), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_4, 1.0), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %mul_12, %mul_14), kwargs = {}) triton_poi_fused_elu_13 = async_compile.triton('triton_poi_fused_elu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_13(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/eu/ceuca5bl73qkpewrgv7wwewoaxw56rsqjdi7uetbctm4a76wn4pt.py # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_10 => expm1_5, gt_5, mul_15, mul_17, where_5 # Graph fragment: # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm_1, 0), kwargs = {}) # %mul_15 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_1, 1.0), kwargs = {}) # %expm1_5 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_15,), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_5, 1.0), kwargs = {}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %mul_15, %mul_17), kwargs = {}) triton_poi_fused_elu_14 = async_compile.triton('triton_poi_fused_elu_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_14(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/b3/cb3k7rcbdtiij34c2ssloi7lqph2yffioahqqabe2xpr3c6ombhk.py # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_11 => expm1_6, gt_6, mul_18, mul_20, where_6 # Graph fragment: # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm_2, 0), kwargs = {}) # %mul_18 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_2, 1.0), kwargs = {}) # %expm1_6 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_18,), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_6, 1.0), kwargs = {}) # %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %mul_18, %mul_20), kwargs = {}) triton_poi_fused_elu_15 = async_compile.triton('triton_poi_fused_elu_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_15(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/f4/cf4hc3mjswsaoaaluwkaxixhchimowwuxv2mpymox445rjndcelt.py # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_12 => expm1_7, gt_7, mul_21, mul_23, where_7 # Graph fragment: # %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm_3, 0), kwargs = {}) # %mul_21 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_3, 1.0), kwargs = {}) # %expm1_7 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_21,), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_7, 1.0), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %mul_21, %mul_23), kwargs = {}) triton_poi_fused_elu_16 = async_compile.triton('triton_poi_fused_elu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_16(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (512, ), (1, )) assert_size_stride(primals_10, (128, 2048), (2048, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (256, 128), (128, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (512, 256), (256, 1)) assert_size_stride(primals_15, (512, ), (1, )) assert_size_stride(primals_16, (1024, 512), (512, 1)) assert_size_stride(primals_17, (1024, ), (1, )) assert_size_stride(primals_18, (10, 1024), (1024, 1)) assert_size_stride(primals_19, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 192, 9, grid=grid(192, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 131072, 9, grid=grid(131072, 9), stream=stream0) del primals_8 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf6 = buf5; del buf5 # reuse buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_5.run(buf6, primals_2, buf7, 1048576, grid=grid(1048576), stream=stream0) del primals_2 buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_6.run(buf7, buf8, buf9, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf11 = buf10; del buf10 # reuse buf12 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_7.run(buf11, primals_5, buf12, 524288, grid=grid(524288), stream=stream0) del primals_5 buf13 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf14 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_8.run(buf12, buf13, buf14, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf13, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf16 = buf15; del buf15 # reuse buf17 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_9.run(buf16, primals_7, buf17, 262144, grid=grid(262144), stream=stream0) del primals_7 buf18 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf19 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_10.run(buf17, buf18, buf19, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf18, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf21 = buf20; del buf20 # reuse buf22 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_11.run(buf21, primals_9, buf22, 131072, grid=grid(131072), stream=stream0) del primals_9 buf23 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) buf24 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_12.run(buf22, buf23, buf24, 64, 512, grid=grid(64, 512), stream=stream0) buf25 = empty_strided_cuda((16, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_10, (2048, 128), (1, 2048), 0), alpha=1, beta=1, out=buf25) del primals_11 buf26 = empty_strided_cuda((16, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.elu] triton_poi_fused_elu_13.run(buf25, buf26, 2048, grid=grid(2048), stream=stream0) buf27 = empty_strided_cuda((16, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf26, reinterpret_tensor(primals_12, (128, 256), (1, 128), 0), alpha=1, beta=1, out=buf27) del primals_13 buf28 = empty_strided_cuda((16, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.elu] triton_poi_fused_elu_14.run(buf27, buf28, 4096, grid=grid(4096), stream=stream0) buf29 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_15, buf28, reinterpret_tensor(primals_14, (256, 512), (1, 256), 0), alpha=1, beta=1, out=buf29) del primals_15 buf30 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.elu] triton_poi_fused_elu_15.run(buf29, buf30, 8192, grid=grid(8192), stream=stream0) buf31 = empty_strided_cuda((16, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_17, buf30, reinterpret_tensor(primals_16, (512, 1024), (1, 512), 0), alpha=1, beta=1, out=buf31) del primals_17 buf32 = empty_strided_cuda((16, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.elu] triton_poi_fused_elu_16.run(buf31, buf32, 16384, grid=grid(16384), stream=stream0) buf33 = empty_strided_cuda((16, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.addmm] extern_kernels.addmm(primals_19, buf32, reinterpret_tensor(primals_18, (1024, 10), (1, 1024), 0), alpha=1, beta=1, out=buf33) del primals_19 return (buf33, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf9, buf11, buf12, buf13, buf14, buf16, buf17, buf18, buf19, buf21, buf22, buf23, reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), buf25, buf26, buf27, buf28, buf29, buf30, buf31, buf32, primals_18, primals_16, primals_14, primals_12, primals_10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((512, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((1024, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((10, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net2(nn.Module): def __init__(self): super(Net2, self).__init__() self.conv1 = nn.Conv2d(3, 64, 3, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, padding=1) self.conv3 = nn.Conv2d(128, 256, 3, padding=1) self.conv4 = nn.Conv2d(256, 512, 3, padding=1) self.pool1 = nn.MaxPool2d(2, 2) self.pool2 = nn.MaxPool2d(2, 2) self.pool3 = nn.MaxPool2d(2, 2) self.pool4 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(512 * 2 * 2, 128) self.fc2 = nn.Linear(128, 256) self.fc3 = nn.Linear(256, 512) self.fc4 = nn.Linear(512, 1024) self.fc5 = nn.Linear(1024, 10) def forward(self, x): x = F.elu(self.conv1(x)) x = self.pool1(x) x = F.elu(self.conv2(x)) x = self.pool2(x) x = F.elu(self.conv3(x)) x = self.pool3(x) x = F.elu(self.conv4(x)) x = self.pool4(x) x = x.view(-1, 512 * 2 * 2) x = F.elu(self.fc1(x)) x = F.elu(self.fc2(x)) x = F.elu(self.fc3(x)) x = F.elu(self.fc4(x)) x = self.fc5(x) return x def linear_layer_ids(self): return [12, 14, 16] def linear_layer_parameters(self): linear1 = torch.cat([x.view(-1) for x in self.fc1.parameters() or self.fc2.parameters() or self.fc3.parameters() or self.fc4. parameters() or self.fc5.parameters()]) return linear1 def train_order_block_ids(self): return [[14, 15], [4, 5], [2, 3], [8, 9], [16, 17], [12, 13], [6, 7 ], [0, 1], [10, 11]] def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_elu_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 512 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y5 = yindex y4 = yindex // 16 y6 = yindex % 16 tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y0 + 8192 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (512 + x2 + 1024 * y0 + 8192 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4096 + x2 + 1024 * y0 + 8192 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (4608 + x2 + 1024 * y0 + 8192 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 512 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 16 * x2 + 8192 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_elu_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_15(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_16(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (128, 2048), (2048, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (256, 128), (128, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (512, 256), (256, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (1024, 512), (512, 1)) assert_size_stride(primals_17, (1024,), (1,)) assert_size_stride(primals_18, (10, 1024), (1024, 1)) assert_size_stride(primals_19, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_2[grid(8192, 9)](primals_4, buf2, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(32768, 9)](primals_6, buf3, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_4[grid(131072, 9)](primals_8, buf4, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_convolution_elu_5[grid(1048576)](buf6, primals_2, buf7, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_6[grid(262144)](buf7, buf8, buf9, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) triton_poi_fused_convolution_elu_7[grid(524288)](buf11, primals_5, buf12, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf13 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf14 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(131072)](buf12, buf13, buf14, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf15 = extern_kernels.convolution(buf13, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf16 = buf15 del buf15 buf17 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_convolution_elu_9[grid(262144)](buf16, primals_7, buf17, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf18 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf19 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(65536)](buf17, buf18, buf19, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf20 = extern_kernels.convolution(buf18, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf21 = buf20 del buf20 buf22 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_convolution_elu_11[grid(131072)](buf21, primals_9, buf22, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf23 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) buf24 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_12[grid(64, 512)](buf22, buf23, buf24, 64, 512, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) buf25 = empty_strided_cuda((16, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_10, (2048, 128 ), (1, 2048), 0), alpha=1, beta=1, out=buf25) del primals_11 buf26 = empty_strided_cuda((16, 128), (128, 1), torch.float32) triton_poi_fused_elu_13[grid(2048)](buf25, buf26, 2048, XBLOCK=128, num_warps=4, num_stages=1) buf27 = empty_strided_cuda((16, 256), (256, 1), torch.float32) extern_kernels.addmm(primals_13, buf26, reinterpret_tensor( primals_12, (128, 256), (1, 128), 0), alpha=1, beta=1, out=buf27) del primals_13 buf28 = empty_strided_cuda((16, 256), (256, 1), torch.float32) triton_poi_fused_elu_14[grid(4096)](buf27, buf28, 4096, XBLOCK=256, num_warps=4, num_stages=1) buf29 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_15, buf28, reinterpret_tensor( primals_14, (256, 512), (1, 256), 0), alpha=1, beta=1, out=buf29) del primals_15 buf30 = empty_strided_cuda((16, 512), (512, 1), torch.float32) triton_poi_fused_elu_15[grid(8192)](buf29, buf30, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf31 = empty_strided_cuda((16, 1024), (1024, 1), torch.float32) extern_kernels.addmm(primals_17, buf30, reinterpret_tensor( primals_16, (512, 1024), (1, 512), 0), alpha=1, beta=1, out=buf31) del primals_17 buf32 = empty_strided_cuda((16, 1024), (1024, 1), torch.float32) triton_poi_fused_elu_16[grid(16384)](buf31, buf32, 16384, XBLOCK= 256, num_warps=4, num_stages=1) buf33 = empty_strided_cuda((16, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_19, buf32, reinterpret_tensor( primals_18, (1024, 10), (1, 1024), 0), alpha=1, beta=1, out=buf33) del primals_19 return (buf33, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf9, buf11, buf12, buf13, buf14, buf16, buf17, buf18, buf19, buf21, buf22, buf23, reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), buf25, buf26, buf27, buf28, buf29, buf30, buf31, buf32, primals_18, primals_16, primals_14, primals_12, primals_10) class Net2New(nn.Module): def __init__(self): super(Net2New, self).__init__() self.conv1 = nn.Conv2d(3, 64, 3, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, padding=1) self.conv3 = nn.Conv2d(128, 256, 3, padding=1) self.conv4 = nn.Conv2d(256, 512, 3, padding=1) self.pool1 = nn.MaxPool2d(2, 2) self.pool2 = nn.MaxPool2d(2, 2) self.pool3 = nn.MaxPool2d(2, 2) self.pool4 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(512 * 2 * 2, 128) self.fc2 = nn.Linear(128, 256) self.fc3 = nn.Linear(256, 512) self.fc4 = nn.Linear(512, 1024) self.fc5 = nn.Linear(1024, 10) def linear_layer_ids(self): return [12, 14, 16] def linear_layer_parameters(self): linear1 = torch.cat([x.view(-1) for x in self.fc1.parameters() or self.fc2.parameters() or self.fc3.parameters() or self.fc4. parameters() or self.fc5.parameters()]) return linear1 def train_order_block_ids(self): return [[14, 15], [4, 5], [2, 3], [8, 9], [16, 17], [12, 13], [6, 7 ], [0, 1], [10, 11]] def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc1.weight primals_11 = self.fc1.bias primals_12 = self.fc2.weight primals_13 = self.fc2.bias primals_14 = self.fc3.weight primals_15 = self.fc3.bias primals_16 = self.fc4.weight primals_17 = self.fc4.bias primals_18 = self.fc5.weight primals_19 = self.fc5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0]
SarodYatawatta/federated-pytorch-test
Net2
false
8,763
[ "Apache-2.0" ]
33
42a51ba12a92b32fa19273340d5b61e74e11d8e0
https://github.com/SarodYatawatta/federated-pytorch-test/tree/42a51ba12a92b32fa19273340d5b61e74e11d8e0
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/ql/cqlctlr27pgl2rxfxsk7kvof2ap4g5pxqdvo6phu4wgvkcyym6ys.py # Topologically Sorted Source Nodes: [conv2d, elu], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d => convolution # elu => expm1, gt, mul, mul_2, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_0 = async_compile.triton('triton_poi_fused_convolution_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 784) % 6 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/di/cdilhsraowpc4455uuolbkzkppabfggt7uygpzwf5542ztj6h4sc.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = (xindex // 14) x2 = (xindex // 1176) x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/aw/cawy57b4rygr2q24uj4fcx3azr7huwdkhi76iwjlfitfs3ggbxja.py # Topologically Sorted Source Nodes: [conv2d_1, elu_1], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # elu_1 => expm1_1, gt_1, mul_3, mul_5, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {}) triton_poi_fused_convolution_elu_2 = async_compile.triton('triton_poi_fused_convolution_elu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/22/c22xudorbl4i7pd6ur5mccol5iaivhxawnvbofzq2iyziy74773n.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ie/cie7ia2kvvjlxht6x2s43ltsixabssmffe35wfxp5lrqdnhjgmsc.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_3 => expm1_2, gt_2, mul_6, mul_8, where_2 # Graph fragment: # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm, 0), kwargs = {}) # %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm, 1.0), kwargs = {}) # %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_6,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_6, %mul_8), kwargs = {}) triton_poi_fused_elu_4 = async_compile.triton('triton_poi_fused_elu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/kz/ckzez4xyth27v5wssy5maidngbwpz56upvrj5udyv3q2swq5m33u.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_4 => expm1_3, gt_3, mul_11, mul_9, where_3 # Graph fragment: # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm_1, 0), kwargs = {}) # %mul_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_1, 1.0), kwargs = {}) # %expm1_3 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_9,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_3, 1.0), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %mul_9, %mul_11), kwargs = {}) triton_poi_fused_elu_5 = async_compile.triton('triton_poi_fused_elu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120, ), (1, )) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84, ), (1, )) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 6, 28, 28), (4704, 784, 28, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, elu], Original ATen: [aten.convolution, aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_elu_0.run(buf1, primals_2, buf2, 18816, grid=grid(18816), stream=stream0) del primals_2 buf3 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32) buf4 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf2, buf3, buf4, 4704, grid=grid(4704), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 16, 10, 10), (1600, 100, 10, 1)) buf6 = buf5; del buf5 # reuse buf7 = empty_strided_cuda((4, 16, 10, 10), (1600, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, elu_1], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_2.run(buf6, primals_5, buf7, 6400, grid=grid(6400), stream=stream0) del primals_5 buf8 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf9 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf7, buf8, buf9, 1600, grid=grid(1600), stream=stream0) buf10 = empty_strided_cuda((4, 120), (120, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), alpha=1, beta=1, out=buf10) del primals_7 buf11 = empty_strided_cuda((4, 120), (120, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.elu] triton_poi_fused_elu_4.run(buf10, buf11, 480, grid=grid(480), stream=stream0) buf12 = empty_strided_cuda((4, 84), (84, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf11, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), alpha=1, beta=1, out=buf12) del primals_9 buf13 = empty_strided_cuda((4, 84), (84, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.elu] triton_poi_fused_elu_5.run(buf12, buf13, 336, grid=grid(336), stream=stream0) buf14 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf13, reinterpret_tensor(primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf14) del primals_11 return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf4, buf6, buf7, buf8, reinterpret_tensor(buf9, (4, 400), (400, 1), 0), buf10, buf11, buf12, buf13, primals_10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((120, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((10, 84), (84, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.elu(self.conv1(x))) x = self.pool(F.elu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.elu(self.fc1(x)) x = F.elu(self.fc2(x)) x = self.fc3(x) return x def linear_layer_ids(self): return [4, 6, 8] def linear_layer_parameters(self): linear1 = torch.cat([x.view(-1) for x in self.fc1.parameters() or self.fc2.parameters() or self.fc3.parameters()]) return linear1 def train_order_block_ids(self): return [[4, 5], [0, 1], [2, 3], [6, 7], [8, 9]] def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_elu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_elu_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_elu_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_elu_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 6, 28, 28), (4704, 784, 28, 1), torch .float32) get_raw_stream(0) triton_poi_fused_convolution_elu_0[grid(18816)](buf1, primals_2, buf2, 18816, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf4 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf2, buf3, buf4, 4704, XBLOCK=128, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 16, 10, 10), (1600, 100, 10, 1)) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 16, 10, 10), (1600, 100, 10, 1), torch.float32) triton_poi_fused_convolution_elu_2[grid(6400)](buf6, primals_5, buf7, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf8 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf9 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf7, buf8, buf9, 1600, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400 ), 0), alpha=1, beta=1, out=buf10) del primals_7 buf11 = empty_strided_cuda((4, 120), (120, 1), torch.float32) triton_poi_fused_elu_4[grid(480)](buf10, buf11, 480, XBLOCK=256, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.addmm(primals_9, buf11, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), alpha=1, beta=1, out=buf12) del primals_9 buf13 = empty_strided_cuda((4, 84), (84, 1), torch.float32) triton_poi_fused_elu_5[grid(336)](buf12, buf13, 336, XBLOCK=256, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf13, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf14) del primals_11 return (buf14, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf4, buf6, buf7, buf8, reinterpret_tensor(buf9, (4, 400), (400, 1), 0), buf10, buf11, buf12, buf13, primals_10, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def linear_layer_ids(self): return [4, 6, 8] def linear_layer_parameters(self): linear1 = torch.cat([x.view(-1) for x in self.fc1.parameters() or self.fc2.parameters() or self.fc3.parameters()]) return linear1 def train_order_block_ids(self): return [[4, 5], [0, 1], [2, 3], [6, 7], [8, 9]] def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
SarodYatawatta/federated-pytorch-test
Net
false
8,764
[ "Apache-2.0" ]
33
42a51ba12a92b32fa19273340d5b61e74e11d8e0
https://github.com/SarodYatawatta/federated-pytorch-test/tree/42a51ba12a92b32fa19273340d5b61e74e11d8e0
ContextgenCNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/qi/cqi3gcnggm7n2eogjvogbs2d42nztjvpa3tgbc7bn2ipfavwmjz2.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = (yindex // 1024) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None) tl.store(out_ptr0 + (y0 + (1024*x2) + (4194304*y1)), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/o4/co4pmatq2ocbyjariiddjrqof4cpeh7oi3ha4nqoeswhmgbzaqnl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 4 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (1024*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/pc/cpc5av7evu6xvhb2ticnbho3aq5o6wabu5ksco5gs5jm4rmvftsj.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 4 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (1024*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/oa/coammjpv2csnxmwiqfy6zhufbm23jpgfgqdsxzcavwsqtf63wgcc.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu] # Source node to ATen node mapping: # x => expm1, gt, mul, mul_2, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_elu_3 = async_compile.triton('triton_poi_fused_elu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ep/cepafn2cmexfhyf74g4uldbv5gw7c4t3yh4amb74kkzghsmxxqvy.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_1 => expm1_1, gt_1, mul_3, mul_5, where_1 # Graph fragment: # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {}) triton_poi_fused_elu_4 = async_compile.triton('triton_poi_fused_elu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4326400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/6d/c6d6pmtogrlleffm7ubss4yha327didq3khnthkzunzbjm42ivlw.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_2 => expm1_2, gt_2, mul_6, mul_8, where_2 # Graph fragment: # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 1.0), kwargs = {}) # %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_6,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_6, %mul_8), kwargs = {}) triton_poi_fused_elu_5 = async_compile.triton('triton_poi_fused_elu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8388608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/me/cmekr5qqpwwp5ufo36znsoguecph5kfrkfhcb2fynddks7tbj2ez.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_3 => expm1_3, gt_3, mul_11, mul_9, where_3 # Graph fragment: # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 1.0), kwargs = {}) # %expm1_3 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_9,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_3, 1.0), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %mul_9, %mul_11), kwargs = {}) triton_poi_fused_elu_6 = async_compile.triton('triton_poi_fused_elu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 1024], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4096 y1 = (yindex // 4096) tmp0 = tl.load(in_ptr0 + (x2 + (1024*y3)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (y0 + (4096*x2) + (4194304*y1)), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_2, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_3, (256, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_4, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_5, (1024, 512, 1, 1), (512, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_2, buf0, 4096, 4096, grid=grid(4096, 4096), stream=stream0) del primals_2 buf1 = empty_strided_cuda((256, 256, 2, 2), (1024, 1, 512, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 65536, 4, grid=grid(65536, 4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((512, 256, 2, 2), (1024, 1, 512, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 131072, 4, grid=grid(131072, 4), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf4 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.elu] triton_poi_fused_elu_3.run(buf3, buf4, 4194304, grid=grid(4194304), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 65, 65), (1081600, 1, 16640, 256)) buf6 = empty_strided_cuda((4, 256, 65, 65), (1081600, 1, 16640, 256), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.elu] triton_poi_fused_elu_4.run(buf5, buf6, 4326400, grid=grid(4326400), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf8 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.elu] triton_poi_fused_elu_5.run(buf7, buf8, 8388608, grid=grid(8388608), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 1024, 64, 64), (4194304, 1, 65536, 1024)) buf10 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.elu] triton_poi_fused_elu_6.run(buf9, buf10, 16384, 1024, grid=grid(16384, 1024), stream=stream0) return (buf10, primals_1, buf0, buf1, buf2, primals_5, buf3, buf4, buf5, buf6, buf7, buf8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1024, 64, 64), (4194304, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, 256, 2, 2), (1024, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((512, 256, 2, 2), (1024, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1024, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class ContextgenCNN(nn.Module): def __init__(self, latent_dim=1024): super(ContextgenCNN, self).__init__() self.latent_dim = latent_dim self.conv1 = nn.Conv2d(self.latent_dim, self.latent_dim // 4, 1, stride=1, padding=0, bias=False) self.conv2 = nn.Conv2d(self.latent_dim // 4, self.latent_dim // 4, 2, stride=1, padding=1, bias=False) self.conv3 = nn.Conv2d(self.latent_dim // 4, self.latent_dim // 2, 2, stride=1, padding=0, bias=False) self.conv4 = nn.Conv2d(self.latent_dim // 2, self.latent_dim, 1, stride=1, padding=0, bias=False) def forward(self, x): x = F.elu(self.conv1(x)) x = F.elu(self.conv2(x)) x = F.elu(self.conv3(x)) x = F.elu(self.conv4(x)) return x def train_order_block_ids(self): return [[0, 3]] def get_inputs(): return [torch.rand([4, 1024, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None) tl.store(out_ptr0 + (y0 + 1024 * x2 + 4194304 * y1), tmp0, None) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_elu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4326400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_elu_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 1024 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4096 y1 = yindex // 4096 tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y3), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (y0 + 4096 * x2 + 4194304 * y1), tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_2, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_3, (256, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_4, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_5, (1024, 512, 1, 1), (512, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536, 1024), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(4096, 4096)](primals_2, buf0, 4096, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((256, 256, 2, 2), (1024, 1, 512, 256), torch.float32) triton_poi_fused_1[grid(65536, 4)](primals_3, buf1, 65536, 4, XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((512, 256, 2, 2), (1024, 1, 512, 256), torch.float32) triton_poi_fused_2[grid(131072, 4)](primals_4, buf2, 131072, 4, XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf4 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) triton_poi_fused_elu_3[grid(4194304)](buf3, buf4, 4194304, XBLOCK= 1024, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 65, 65), (1081600, 1, 16640, 256)) buf6 = empty_strided_cuda((4, 256, 65, 65), (1081600, 1, 16640, 256 ), torch.float32) triton_poi_fused_elu_4[grid(4326400)](buf5, buf6, 4326400, XBLOCK= 1024, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf6, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 512, 64, 64), (2097152, 1, 32768, 512)) buf8 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) triton_poi_fused_elu_5[grid(8388608)](buf7, buf8, 8388608, XBLOCK= 1024, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 1024, 64, 64), (4194304, 1, 65536, 1024)) buf10 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 4096, 64, 1 ), torch.float32) triton_poi_fused_elu_6[grid(16384, 1024)](buf9, buf10, 16384, 1024, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) return (buf10, primals_1, buf0, buf1, buf2, primals_5, buf3, buf4, buf5, buf6, buf7, buf8, buf9) class ContextgenCNNNew(nn.Module): def __init__(self, latent_dim=1024): super(ContextgenCNNNew, self).__init__() self.latent_dim = latent_dim self.conv1 = nn.Conv2d(self.latent_dim, self.latent_dim // 4, 1, stride=1, padding=0, bias=False) self.conv2 = nn.Conv2d(self.latent_dim // 4, self.latent_dim // 4, 2, stride=1, padding=1, bias=False) self.conv3 = nn.Conv2d(self.latent_dim // 4, self.latent_dim // 2, 2, stride=1, padding=0, bias=False) self.conv4 = nn.Conv2d(self.latent_dim // 2, self.latent_dim, 1, stride=1, padding=0, bias=False) def train_order_block_ids(self): return [[0, 3]] def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
SarodYatawatta/federated-pytorch-test
ContextgenCNN
false
8,765
[ "Apache-2.0" ]
33
42a51ba12a92b32fa19273340d5b61e74e11d8e0
https://github.com/SarodYatawatta/federated-pytorch-test/tree/42a51ba12a92b32fa19273340d5b61e74e11d8e0
G_Large
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/pl/cpl6jhqnafnew6pronlksao6s33bcslpzm3k66a74phcycboxpwr.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (1024*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/eb/cebfmp3xsydvhof2vuiuzrtwr7fwapeufpm52glmntqds2lsygkv.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/q4/cq46xznhsxlsfekongioaargva4mzxnrgtlpbrrnhugzm5zrigaf.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (1024*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/e7/ce732v5xfre7fgt2flmb3cuihzmpqik354xghqhh7fjyqsch5gun.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (y0 + (64*x2) + (262144*y1)), tmp4, ymask) tl.store(out_ptr1 + (y0 + (64*x2) + (262144*y1)), tmp7, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zk/czkb2cmj6u3zundkdjn4nolh6qxj3z6wtcu5b6efbrao4q26z6tg.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_2 => convolution_1 # x_3 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {}) # %where_1 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zd/czd5znagt6tu6u452lxmj7nc43sgnnxpbrrchtlbbvveitiz6p63.py # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_4 => convolution_2 # x_5 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {}) # %where_2 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused_convolution_leaky_relu_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ek/ceknezy6h4cudr6f3irjpgrhymry7rywljqf5mtykxrgvrl4jon5.py # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_6 => convolution_3 # x_7 => gt_3, mul_3, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {}) # %where_3 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/dn/cdnx2ufwslnnzer36n2n7psbtlrno5rqoundkx5uf2kpimyr2wtp.py # Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_8 => convolution_4 # x_9 => gt_4, mul_4, where_4 # Graph fragment: # %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_10, %primals_11, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {}) # %where_4 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) triton_poi_fused_convolution_leaky_relu_7 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/sb/csbsiq3iyfwfec6lrf25x7bhycqnvevvoahmheb2dmjzihzrgcm5.py # Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_10 => convolution_5 # x_11 => gt_5, mul_5, where_5 # Graph fragment: # %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_12, %primals_13, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_5 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 0.2), kwargs = {}) # %where_5 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_5, %mul_5), kwargs = {}) triton_poi_fused_convolution_leaky_relu_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ku/ckuyldlzu3mnciuyyzgwqd2dmcwxhp5xrx7sborvm6cwuaiqguye.py # Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # x_12 => convolution_6 # x_13 => gt_6, mul_6, where_6 # Graph fragment: # %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_5, %primals_14, %primals_15, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_6 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.2), kwargs = {}) # %where_6 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_6), kwargs = {}) triton_poi_fused_convolution_leaky_relu_9 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xa/cxaikaxqp3qjbf73hfwdlzk46k46uf3g6zp3ziawkbmoyb36dw74.py # Topologically Sorted Source Nodes: [d], Original ATen: [aten.cat] # Source node to ATen node mapping: # d => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %where_6], 1), kwargs = {}) triton_poi_fused_cat_10 = async_compile.triton('triton_poi_fused_cat_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/y4/cy4y6wjsf4oyjjhgebntfgtqjtg3qy6oc2gnfg7q2nu6k7eqxzoq.py # Topologically Sorted Source Nodes: [d_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_1, %where_5], 1), kwargs = {}) triton_poi_fused_cat_11 = async_compile.triton('triton_poi_fused_cat_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/a7/ca7i4rvmtxvgdt3g3jyxge3k77hc6xywmryzmmvnvx6zrbphxgti.py # Topologically Sorted Source Nodes: [d_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_2, %where_4], 1), kwargs = {}) triton_poi_fused_cat_12 = async_compile.triton('triton_poi_fused_cat_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vh/cvh76qqe2azhxs75p7igqklwrgs35zngmoo5nc4hixq3upunfbvj.py # Topologically Sorted Source Nodes: [d_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_3 => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_3, %where_3], 1), kwargs = {}) triton_poi_fused_cat_13 = async_compile.triton('triton_poi_fused_cat_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/qb/cqb25zzjfyjylpzcqb26wdws43n23ccefau7y5tdyvuldkztlpx3.py # Topologically Sorted Source Nodes: [d_4], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_4 => cat_4 # Graph fragment: # %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_4, %where_2], 1), kwargs = {}) triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/up/cupjkoubabwmtidpejhgamf4gzh7xz6muiyky2uf662ltmrwc47r.py # Topologically Sorted Source Nodes: [d_5], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_5 => cat_5 # Graph fragment: # %cat_5 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_5, %where_1], 1), kwargs = {}) triton_poi_fused_cat_15 = async_compile.triton('triton_poi_fused_cat_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/o6/co6id2qfxwm7btnemyq2qui6nmltu436xerhtylviaead5yj3s26.py # Topologically Sorted Source Nodes: [d_6], Original ATen: [aten.cat] # Source node to ATen node mapping: # d_6 => cat_6 # Graph fragment: # %cat_6 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_6, %where], 1), kwargs = {}) triton_poi_fused_cat_16 = async_compile.triton('triton_poi_fused_cat_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 128, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((64*x1) + ((-64) + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/el/celgn2ifchgsbqr6dvotmq4cfrjpax3l2wr65cqghlkrrfigsus4.py # Topologically Sorted Source Nodes: [x_33, x_34], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_33 => convolution_15 # x_34 => relu_7 # Graph fragment: # %convolution_15 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_6, %primals_32, %primals_33, [2, 2], [2, 2], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_15,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_17 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_17(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + (x0), tmp5, None) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/p4/cp4fxi7tpjv5it63mebbtmj2aksksuztpyzu52cgau3ytdaxpfui.py # Topologically Sorted Source Nodes: [x_31, x_32], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_31 => convolution_14 # x_32 => relu_6 # Graph fragment: # %convolution_14 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_5, %primals_30, %primals_31, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_14,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_18 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/vc/cvcdoneqdimz4dor6naslrphlonvdog467pi4yqmo6e3bqqqhgvv.py # Topologically Sorted Source Nodes: [x_29, x_30], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_29 => convolution_13 # x_30 => relu_5 # Graph fragment: # %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_4, %primals_28, %primals_29, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_19 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zd/czdwpkeijxkdyvat3m2pijud3ifclthnovetpxop4jq63rubypg3.py # Topologically Sorted Source Nodes: [x_27, x_28], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_27 => convolution_12 # x_28 => relu_4 # Graph fragment: # %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_26, %primals_27, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_12,), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_20 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/n4/cn46qzzwur7e6qvsdjkc7cvevmj4hayozvzjmhdba22v5yhhtlfa.py # Topologically Sorted Source Nodes: [x_25, x_26], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_25 => convolution_11 # x_26 => relu_3 # Graph fragment: # %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_24, %primals_25, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {}) # %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_21 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ih/cihljp53dfkeajfst7qfwatbwgujjsvm5jg764lb7bvhnawbemg2.py # Topologically Sorted Source Nodes: [x_22, x_24], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_22 => convolution_10 # x_24 => relu_2 # Graph fragment: # %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_22, %primals_23, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {}) # %le_5 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_22 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/at/cathxe5mxl2ujj65a4gvh7ychjyy2o52hj4rdr66wlp3wmlwfqiq.py # Topologically Sorted Source Nodes: [x_19, x_21], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_19 => convolution_9 # x_21 => relu_1 # Graph fragment: # %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_20, %primals_21, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {}) # %le_6 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_23 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_23(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/lx/clxlzq2vek7f5a5n375v62nmazsjoh7sozcf5jhuwjc6kfgdj4e5.py # Topologically Sorted Source Nodes: [x_16, x_18], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_16 => convolution_8 # x_18 => relu # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_7, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {}) # %le_7 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_24 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_24', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_24(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33 = args args.clear() assert_size_stride(primals_1, (64, 1, 6, 6), (36, 36, 6, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 1, 128, 128), (16384, 16384, 128, 1)) assert_size_stride(primals_4, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_11, (64, ), (1, )) assert_size_stride(primals_12, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_13, (64, ), (1, )) assert_size_stride(primals_14, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_15, (64, ), (1, )) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64, ), (1, )) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64, ), (1, )) assert_size_stride(primals_20, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_21, (64, ), (1, )) assert_size_stride(primals_22, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_23, (64, ), (1, )) assert_size_stride(primals_24, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_25, (64, ), (1, )) assert_size_stride(primals_26, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_27, (64, ), (1, )) assert_size_stride(primals_28, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_29, (64, ), (1, )) assert_size_stride(primals_30, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_31, (64, ), (1, )) assert_size_stride(primals_32, (128, 1, 6, 6), (36, 36, 6, 1)) assert_size_stride(primals_33, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_4, buf0, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_4 buf1 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_6, buf1, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_6 buf2 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_8, buf2, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_8 buf3 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_10, buf3, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_10 buf4 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_12, buf4, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_12 buf5 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_14, buf5, 4096, 16, grid=grid(4096, 16), stream=stream0) del primals_14 buf6 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_16, buf6, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_16 buf7 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_18, buf7, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_18 buf8 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_20, buf8, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_20 buf9 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_22, buf9, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_22 buf10 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_24, buf10, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_24 buf11 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_26, buf11, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_26 buf12 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_28, buf12, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_28 buf13 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_30, buf13, 8192, 16, grid=grid(8192, 16), stream=stream0) del primals_30 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf15 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool) buf16 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_3.run(buf14, primals_2, buf15, buf16, 256, 4096, grid=grid(256, 4096), stream=stream0) del buf14 del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.bool) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf17, primals_5, buf18, buf19, 262144, grid=grid(262144), stream=stream0) del buf17 del primals_5 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, buf1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 16, 16), (16384, 1, 1024, 64)) buf21 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.bool) buf22 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_5.run(buf20, primals_7, buf21, buf22, 65536, grid=grid(65536), stream=stream0) del buf20 del primals_7 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf23 = extern_kernels.convolution(buf22, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 64, 8, 8), (4096, 1, 512, 64)) buf24 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.bool) buf25 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.float32) # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf23, primals_9, buf24, buf25, 16384, grid=grid(16384), stream=stream0) del buf23 del primals_9 # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, buf3, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 64, 4, 4), (1024, 1, 256, 64)) buf27 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch.bool) buf28 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_7.run(buf26, primals_11, buf27, buf28, 4096, grid=grid(4096), stream=stream0) del buf26 del primals_11 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf29 = extern_kernels.convolution(buf28, buf4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 64, 2, 2), (256, 1, 128, 64)) buf30 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch.bool) buf31 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch.float32) # Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_8.run(buf29, primals_13, buf30, buf31, 1024, grid=grid(1024), stream=stream0) del buf29 del primals_13 # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, buf5, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 64, 1, 1), (64, 1, 64, 64)) buf33 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) buf34 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.float32) # Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_9.run(buf32, primals_15, buf33, buf34, 256, grid=grid(256), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] buf35 = extern_kernels.convolution(buf34, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 64, 1, 1), (64, 1, 64, 64)) buf36 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) buf37 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_9.run(buf35, primals_17, buf36, buf37, 256, grid=grid(256), stream=stream0) del buf35 del primals_17 # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 64, 1, 1), (64, 1, 64, 64)) buf39 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [d], Original ATen: [aten.cat] triton_poi_fused_cat_10.run(buf38, primals_19, buf34, buf39, 512, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, buf8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 64, 2, 2), (256, 1, 128, 64)) buf41 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) # Topologically Sorted Source Nodes: [d_1], Original ATen: [aten.cat] triton_poi_fused_cat_11.run(buf40, primals_21, buf31, buf41, 2048, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf41, buf9, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 64, 4, 4), (1024, 1, 256, 64)) buf43 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.float32) # Topologically Sorted Source Nodes: [d_2], Original ATen: [aten.cat] triton_poi_fused_cat_12.run(buf42, primals_23, buf28, buf43, 8192, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.convolution] buf44 = extern_kernels.convolution(buf43, buf10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 64, 8, 8), (4096, 1, 512, 64)) buf45 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.float32) # Topologically Sorted Source Nodes: [d_3], Original ATen: [aten.cat] triton_poi_fused_cat_13.run(buf44, primals_25, buf25, buf45, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution] buf46 = extern_kernels.convolution(buf45, buf11, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 64, 16, 16), (16384, 1, 1024, 64)) buf47 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) # Topologically Sorted Source Nodes: [d_4], Original ATen: [aten.cat] triton_poi_fused_cat_14.run(buf46, primals_27, buf22, buf47, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.convolution] buf48 = extern_kernels.convolution(buf47, buf12, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf49 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) # Topologically Sorted Source Nodes: [d_5], Original ATen: [aten.cat] triton_poi_fused_cat_15.run(buf48, primals_29, buf19, buf49, 524288, grid=grid(524288), stream=stream0) # Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution] buf50 = extern_kernels.convolution(buf49, buf13, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf51 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128), torch.float32) # Topologically Sorted Source Nodes: [d_6], Original ATen: [aten.cat] triton_poi_fused_cat_16.run(buf50, primals_31, buf16, buf51, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [x_33], Original ATen: [aten.convolution] buf52 = extern_kernels.convolution(buf51, primals_32, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 1, 128, 128), (16384, 1, 128, 1)) buf53 = reinterpret_tensor(buf52, (4, 1, 128, 128), (16384, 16384, 128, 1), 0); del buf52 # reuse buf54 = empty_strided_cuda((4, 1, 128, 128), (16384, 1, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x_33, x_34], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_17.run(buf53, primals_33, buf54, 65536, grid=grid(65536), stream=stream0) del primals_33 buf55 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool) # Topologically Sorted Source Nodes: [x_31, x_32], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_18.run(buf50, primals_31, buf55, 1048576, grid=grid(1048576), stream=stream0) del buf50 del primals_31 buf56 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.bool) # Topologically Sorted Source Nodes: [x_29, x_30], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_19.run(buf48, primals_29, buf56, 262144, grid=grid(262144), stream=stream0) del buf48 del primals_29 buf57 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.bool) # Topologically Sorted Source Nodes: [x_27, x_28], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_20.run(buf46, primals_27, buf57, 65536, grid=grid(65536), stream=stream0) del buf46 del primals_27 buf58 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch.bool) # Topologically Sorted Source Nodes: [x_25, x_26], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_21.run(buf44, primals_25, buf58, 16384, grid=grid(16384), stream=stream0) del buf44 del primals_25 buf59 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch.bool) # Topologically Sorted Source Nodes: [x_22, x_24], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_22.run(buf42, primals_23, buf59, 4096, grid=grid(4096), stream=stream0) del buf42 del primals_23 buf60 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch.bool) # Topologically Sorted Source Nodes: [x_19, x_21], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_23.run(buf40, primals_21, buf60, 1024, grid=grid(1024), stream=stream0) del buf40 del primals_21 buf61 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) # Topologically Sorted Source Nodes: [x_16, x_18], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_24.run(buf38, primals_19, buf61, 256, grid=grid(256), stream=stream0) del buf38 del primals_19 return (buf53, primals_1, primals_3, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, primals_32, buf15, buf16, buf18, buf19, buf21, buf22, buf24, buf25, buf27, buf28, buf30, buf31, buf33, buf34, buf36, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51, buf54, buf55, buf56, buf57, buf58, buf59, buf60, buf61, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 1, 6, 6), (36, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 128, 128), (16384, 16384, 128, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((64, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((128, 1, 6, 6), (36, 36, 6, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Conv2d, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.dropout is not None: x = self.dropout(x) x = self.activation(x) return x class Deconv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Deconv2d, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.dropout is not None: x = self.dropout(x) x = self.activation(x) return x class G_Large(nn.Module): def __init__(self): super(G_Large, self).__init__() self.encoder_1 = Conv2d(1, 64, 6, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_2 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_3 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_4 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_5 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_6 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_7 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_8 = Conv2d(64, 64, 3, stride=1, bn=False, activation= 'leakyrelu', dropout=False) self.decoder_1 = Deconv2d(64, 64, 3, stride=1, bn=False, activation ='relu', dropout=True) self.decoder_2 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=True) self.decoder_3 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=True) self.decoder_4 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_5 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_6 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_7 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_8 = Deconv2d(128, 1, 6, stride=2, bn=False, activation ='relu', dropout=False) def forward(self, x): e1 = self.encoder_1(x) e2 = self.encoder_2(e1) e3 = self.encoder_3(e2) e4 = self.encoder_4(e3) e5 = self.encoder_5(e4) e6 = self.encoder_6(e5) e7 = self.encoder_7(e6) e8 = self.encoder_8(e7) d = self.decoder_1(e8) d = torch.cat((d, e7), dim=1) d = self.decoder_2(d) d = torch.cat((d, e6), dim=1) d = self.decoder_3(d) d = torch.cat((d, e5), dim=1) d = self.decoder_4(d) d = torch.cat((d, e4), dim=1) d = self.decoder_5(d) d = torch.cat((d, e3), dim=1) d = self.decoder_6(d) d = torch.cat((d, e2), dim=1) d = self.decoder_7(d) d = torch.cat((d, e1), dim=1) d = self.decoder_8(d) return d def get_inputs(): return [torch.rand([4, 1, 128, 128])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp4, ymask) tl.store(out_ptr1 + (y0 + 64 * x2 + 262144 * y1), tmp7, ymask) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_cat_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_cat_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp15 = tl.load(in_ptr2 + (64 * x1 + (-64 + x0)), tmp12, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_17(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, None) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_23(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_24(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33 ) = args args.clear() assert_size_stride(primals_1, (64, 1, 6, 6), (36, 36, 6, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 128, 128), (16384, 16384, 128, 1)) assert_size_stride(primals_4, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (64, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_21, (64,), (1,)) assert_size_stride(primals_22, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_27, (64,), (1,)) assert_size_stride(primals_28, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_29, (64,), (1,)) assert_size_stride(primals_30, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_31, (64,), (1,)) assert_size_stride(primals_32, (128, 1, 6, 6), (36, 36, 6, 1)) assert_size_stride(primals_33, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(4096, 16)](primals_4, buf0, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_0[grid(4096, 16)](primals_6, buf1, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf2 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_0[grid(4096, 16)](primals_8, buf2, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_0[grid(4096, 16)](primals_10, buf3, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_0[grid(4096, 16)](primals_12, buf4, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf5 = empty_strided_cuda((64, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_0[grid(4096, 16)](primals_14, buf5, 4096, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf6 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_1[grid(4096, 9)](primals_16, buf6, 4096, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf7 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_1[grid(4096, 9)](primals_18, buf7, 4096, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf8 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_20, buf8, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf9 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_22, buf9, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf10 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_24, buf10, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf11 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_26, buf11, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf12 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_28, buf12, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_28 buf13 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32) triton_poi_fused_2[grid(8192, 16)](primals_30, buf13, 8192, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_30 buf14 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf15 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool) buf16 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_convolution_leaky_relu_3[grid(256, 4096)](buf14, primals_2, buf15, buf16, 256, 4096, XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1) del buf14 del primals_2 buf17 = extern_kernels.convolution(buf16, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf18 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.bool) buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) triton_poi_fused_convolution_leaky_relu_4[grid(262144)](buf17, primals_5, buf18, buf19, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf17 del primals_5 buf20 = extern_kernels.convolution(buf19, buf1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 16, 16), (16384, 1, 1024, 64)) buf21 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.bool) buf22 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) triton_poi_fused_convolution_leaky_relu_5[grid(65536)](buf20, primals_7, buf21, buf22, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf20 del primals_7 buf23 = extern_kernels.convolution(buf22, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 64, 8, 8), (4096, 1, 512, 64)) buf24 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch .bool) buf25 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch .float32) triton_poi_fused_convolution_leaky_relu_6[grid(16384)](buf23, primals_9, buf24, buf25, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf23 del primals_9 buf26 = extern_kernels.convolution(buf25, buf3, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 64, 4, 4), (1024, 1, 256, 64)) buf27 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch .bool) buf28 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_convolution_leaky_relu_7[grid(4096)](buf26, primals_11, buf27, buf28, 4096, XBLOCK=128, num_warps=4, num_stages=1) del buf26 del primals_11 buf29 = extern_kernels.convolution(buf28, buf4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 64, 2, 2), (256, 1, 128, 64)) buf30 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch.bool ) buf31 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch. float32) triton_poi_fused_convolution_leaky_relu_8[grid(1024)](buf29, primals_13, buf30, buf31, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf29 del primals_13 buf32 = extern_kernels.convolution(buf31, buf5, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 64, 1, 1), (64, 1, 64, 64)) buf33 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) buf34 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch. float32) triton_poi_fused_convolution_leaky_relu_9[grid(256)](buf32, primals_15, buf33, buf34, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf35 = extern_kernels.convolution(buf34, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 64, 1, 1), (64, 1, 64, 64)) buf36 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) buf37 = buf32 del buf32 triton_poi_fused_convolution_leaky_relu_9[grid(256)](buf35, primals_17, buf36, buf37, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf35 del primals_17 buf38 = extern_kernels.convolution(buf37, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 64, 1, 1), (64, 1, 64, 64)) buf39 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 128, 128), torch.float32) triton_poi_fused_cat_10[grid(512)](buf38, primals_19, buf34, buf39, 512, XBLOCK=256, num_warps=4, num_stages=1) buf40 = extern_kernels.convolution(buf39, buf8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 64, 2, 2), (256, 1, 128, 64)) buf41 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) triton_poi_fused_cat_11[grid(2048)](buf40, primals_21, buf31, buf41, 2048, XBLOCK=256, num_warps=4, num_stages=1) buf42 = extern_kernels.convolution(buf41, buf9, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 64, 4, 4), (1024, 1, 256, 64)) buf43 = empty_strided_cuda((4, 128, 4, 4), (2048, 1, 512, 128), torch.float32) triton_poi_fused_cat_12[grid(8192)](buf42, primals_23, buf28, buf43, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf44 = extern_kernels.convolution(buf43, buf10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 64, 8, 8), (4096, 1, 512, 64)) buf45 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.float32) triton_poi_fused_cat_13[grid(32768)](buf44, primals_25, buf25, buf45, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf46 = extern_kernels.convolution(buf45, buf11, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 64, 16, 16), (16384, 1, 1024, 64)) buf47 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) triton_poi_fused_cat_14[grid(131072)](buf46, primals_27, buf22, buf47, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf48 = extern_kernels.convolution(buf47, buf12, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf49 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) triton_poi_fused_cat_15[grid(524288)](buf48, primals_29, buf19, buf49, 524288, XBLOCK=512, num_warps=8, num_stages=1) buf50 = extern_kernels.convolution(buf49, buf13, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf51 = empty_strided_cuda((4, 128, 64, 64), (524288, 1, 8192, 128), torch.float32) triton_poi_fused_cat_16[grid(2097152)](buf50, primals_31, buf16, buf51, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) buf52 = extern_kernels.convolution(buf51, primals_32, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 1, 128, 128), (16384, 1, 128, 1)) buf53 = reinterpret_tensor(buf52, (4, 1, 128, 128), (16384, 16384, 128, 1), 0) del buf52 buf54 = empty_strided_cuda((4, 1, 128, 128), (16384, 1, 128, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_17[grid(65536)]( buf53, primals_33, buf54, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_33 buf55 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_18[grid(1048576)]( buf50, primals_31, buf55, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf50 del primals_31 buf56 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_19[grid(262144)]( buf48, primals_29, buf56, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf48 del primals_29 buf57 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_20[grid(65536)]( buf46, primals_27, buf57, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf46 del primals_27 buf58 = empty_strided_cuda((4, 64, 8, 8), (4096, 1, 512, 64), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_21[grid(16384)]( buf44, primals_25, buf58, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf44 del primals_25 buf59 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_22[grid(4096)]( buf42, primals_23, buf59, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf42 del primals_23 buf60 = empty_strided_cuda((4, 64, 2, 2), (256, 1, 128, 64), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_23[grid(1024)]( buf40, primals_21, buf60, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf40 del primals_21 buf61 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 64, 64), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_24[grid(256)]( buf38, primals_19, buf61, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf38 del primals_19 return (buf53, primals_1, primals_3, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, primals_32, buf15, buf16, buf18, buf19, buf21, buf22, buf24, buf25, buf27, buf28, buf30, buf31, buf33, buf34, buf36, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51, buf54, buf55, buf56, buf57, buf58, buf59, buf60, buf61) class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Conv2d, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.dropout is not None: x = self.dropout(x) x = self.activation(x) return x class Deconv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, bn =False, activation='leakyrelu', dropout=False): super(Deconv2d, self).__init__() padding = int((kernel_size - 1) / 2) self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None self.dropout = nn.Dropout(p=0.5) if dropout else None if activation == 'leakyrelu': self.activation = nn.LeakyReLU(negative_slope=0.2) elif activation == 'relu': self.activation = nn.ReLU() elif activation == 'tanh': self.activation = nn.Tanh() else: raise ValueError('Not a valid activation, received {}'.format( activation)) def forward(self, x): x = self.conv(x) if self.bn is not None: x = self.bn(x) if self.dropout is not None: x = self.dropout(x) x = self.activation(x) return x class G_LargeNew(nn.Module): def __init__(self): super(G_LargeNew, self).__init__() self.encoder_1 = Conv2d(1, 64, 6, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_2 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_3 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_4 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_5 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_6 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_7 = Conv2d(64, 64, 4, stride=2, bn=False, activation= 'leakyrelu', dropout=False) self.encoder_8 = Conv2d(64, 64, 3, stride=1, bn=False, activation= 'leakyrelu', dropout=False) self.decoder_1 = Deconv2d(64, 64, 3, stride=1, bn=False, activation ='relu', dropout=True) self.decoder_2 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=True) self.decoder_3 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=True) self.decoder_4 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_5 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_6 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_7 = Deconv2d(128, 64, 4, stride=2, bn=False, activation='relu', dropout=False) self.decoder_8 = Deconv2d(128, 1, 6, stride=2, bn=False, activation ='relu', dropout=False) def forward(self, input_0): primals_1 = self.encoder_1.conv.weight primals_2 = self.encoder_1.conv.bias primals_4 = self.encoder_2.conv.weight primals_5 = self.encoder_2.conv.bias primals_6 = self.encoder_3.conv.weight primals_7 = self.encoder_3.conv.bias primals_8 = self.encoder_4.conv.weight primals_9 = self.encoder_4.conv.bias primals_10 = self.encoder_5.conv.weight primals_11 = self.encoder_5.conv.bias primals_12 = self.encoder_6.conv.weight primals_13 = self.encoder_6.conv.bias primals_14 = self.encoder_7.conv.weight primals_15 = self.encoder_7.conv.bias primals_16 = self.encoder_8.conv.weight primals_17 = self.encoder_8.conv.bias primals_18 = self.decoder_1.conv.weight primals_19 = self.decoder_1.conv.bias primals_20 = self.decoder_2.conv.weight primals_21 = self.decoder_2.conv.bias primals_22 = self.decoder_3.conv.weight primals_23 = self.decoder_3.conv.bias primals_24 = self.decoder_4.conv.weight primals_25 = self.decoder_4.conv.bias primals_26 = self.decoder_5.conv.weight primals_27 = self.decoder_5.conv.bias primals_28 = self.decoder_6.conv.weight primals_29 = self.decoder_6.conv.bias primals_30 = self.decoder_7.conv.weight primals_31 = self.decoder_7.conv.bias primals_32 = self.decoder_8.conv.weight primals_33 = self.decoder_8.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33]) return output[0]
RQuispeC/pytorch-ACSCP
G_Large
false
8,766
[ "MIT" ]
25
c83f08632012c2245250ff9c5140814461db575c
https://github.com/RQuispeC/pytorch-ACSCP/tree/c83f08632012c2245250ff9c5140814461db575c
Mean_One
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/a6/ca6icgopvjc6u7go6lxmvi7ptdzs66qydkid5vc57ga6vi2erzsu.py # Topologically Sorted Source Nodes: [mul, sum_1, hidden], Original ATen: [aten.mul, aten.sum, aten.div] # Source node to ATen node mapping: # hidden => div # mul => mul # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %unsqueeze), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %unsqueeze_1), kwargs = {}) triton_poi_fused_div_mul_sum_0 = async_compile.triton('triton_poi_fused_div_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x0 = xindex % 16 x2 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = tmp1 + tmp4 tmp16 = tmp15 + tmp8 tmp17 = tmp16 + tmp12 tmp18 = tmp14 / tmp17 tl.store(out_ptr0 + (x4), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/7m/c7mvqxmsbygbhwwz3nonbbvjqbbvum3cmwg6hr3pmkhq3cffqzsx.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sum_1, hidden], Original ATen: [aten.mul, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_mul_sum_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del buf0 del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 return (buf2, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F class Linear(nn.Module): def __init__(self, options, weights=None): super(Linear, self).__init__() self.n_in = options['n_in'] self.n_out = options['n_out'] self.layer = nn.Linear(self.n_in, self.n_out) if weights is not None: self.layer.weight = nn.Parameter(self.load(weights)) else: nn.init.xavier_normal_(self.layer.weight) self.fixed = options['fixed'] if self.fixed: self.layer.weight.requires_grad = False def forward(self, input): return self.layer(input) class Mean_One(nn.Module): def __init__(self, options): super(Mean_One, self).__init__() self.layer = Linear(options) def forward(self, h_e, source_mask): hidden = torch.sum(h_e * source_mask[:, :, None], dim=1) / torch.sum( source_mask, dim=1)[:, None] return F.tanh(self.layer(hidden)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'options': _mock_config(n_in=4, n_out=4, fixed=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x0 = xindex % 16 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = tmp1 + tmp4 tmp16 = tmp15 + tmp8 tmp17 = tmp16 + tmp12 tmp18 = tmp14 / tmp17 tl.store(out_ptr0 + x4, tmp18, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_sum_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del buf0 del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(256)](buf2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 return buf2, buf2 class Linear(nn.Module): def __init__(self, options, weights=None): super(Linear, self).__init__() self.n_in = options['n_in'] self.n_out = options['n_out'] self.layer = nn.Linear(self.n_in, self.n_out) if weights is not None: self.layer.weight = nn.Parameter(self.load(weights)) else: nn.init.xavier_normal_(self.layer.weight) self.fixed = options['fixed'] if self.fixed: self.layer.weight.requires_grad = False def forward(self, input): return self.layer(input) class Mean_OneNew(nn.Module): def __init__(self, options): super(Mean_OneNew, self).__init__() self.layer = Linear(options) def forward(self, input_0, input_1): primals_3 = self.layer.layer.weight primals_4 = self.layer.layer.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
KaiQiangSong/joint_parse_summ
Mean_One
false
8,767
[ "BSD-3-Clause" ]
29
5d4a40d9a681bc8b06c847643d810846f3867216
https://github.com/KaiQiangSong/joint_parse_summ/tree/5d4a40d9a681bc8b06c847643d810846f3867216
MmQAHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/vu/cvucantlgh56apzu7xmoy3xdwz7ss3dj46ir2bgdit6cpuv3wzyk.py # Topologically Sorted Source Nodes: [mul, truediv, erf, add, mul_1, u, sub, pow_1, s, add_1, sqrt, x, mul_2, add_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow, aten.sqrt] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # erf => erf # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_1 => pow_1 # s => mean_1 # sqrt => sqrt # sub => sub # truediv => div # u => mean # x => div_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %div_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_5), kwargs = {}) triton_per_fused_add_div_erf_mean_mul_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_div_erf_mean_mul_pow_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 8], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_erf_mean_mul_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_erf_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 8 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (8*x0)), xmask, other=0.0) tmp25 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (r1), None, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = 8.0 tmp14 = tmp12 / tmp13 tmp15 = tmp8 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp20 / tmp13 tmp22 = 1e-12 tmp23 = tmp21 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp26 = tmp15 / tmp24 tmp27 = tmp25 * tmp26 tmp29 = tmp27 + tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp14, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp24, xmask) tl.store(out_ptr0 + (r1 + (8*x0)), tmp29, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8, ), (1, )) assert_size_stride(primals_5, (8, ), (1, )) assert_size_stride(primals_6, (4, 8), (8, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, truediv, erf, add, mul_1, u, sub, pow_1, s, add_1, sqrt, x, mul_2, add_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow, aten.sqrt] stream0 = get_raw_stream(0) triton_per_fused_add_div_erf_mean_mul_pow_sqrt_sub_0.run(buf2, buf4, buf0, primals_4, primals_5, buf5, 64, 8, grid=grid(64), stream=stream0) del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pred], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf6) del primals_7 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf2, buf4, reinterpret_tensor(buf5, (64, 8), (8, 1), 0), primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(LayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class MmQAHead(nn.Module): def __init__(self, cfg, ans_size): super(MmQAHead, self).__init__() self.cfg = cfg self.dense0 = nn.Linear(cfg.HSIZE, cfg.HSIZE * 2) self.dense1 = nn.Linear(cfg.HSIZE * 2, ans_size) self.layer_norm = LayerNorm(cfg.HSIZE * 2, eps=1e-12) def forward(self, x_pooled): pred = self.dense1(self.layer_norm(gelu(self.dense0(x_pooled)))) return pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(HSIZE=4), 'ans_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_erf_mean_mul_pow_sqrt_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0) tmp25 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = 8.0 tmp14 = tmp12 / tmp13 tmp15 = tmp8 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp20 / tmp13 tmp22 = 1e-12 tmp23 = tmp21 + tmp22 tmp24 = libdevice.sqrt(tmp23) tmp26 = tmp15 / tmp24 tmp27 = tmp25 * tmp26 tmp29 = tmp27 + tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp14, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp24, xmask) tl.store(out_ptr0 + (r1 + 8 * x0), tmp29, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (8, 4), (4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8,), (1,)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (4, 8), (8, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_erf_mean_mul_pow_sqrt_sub_0[grid(64)](buf2, buf4, buf0, primals_4, primals_5, buf5, 64, 8, XBLOCK=8, num_warps=2, num_stages=1) del primals_5 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 8), ( 8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf6) del primals_7 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, buf2, buf4, reinterpret_tensor(buf5, (64, 8), (8, 1), 0 ), primals_6 def gelu(x): return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(LayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class MmQAHeadNew(nn.Module): def __init__(self, cfg, ans_size): super(MmQAHeadNew, self).__init__() self.cfg = cfg self.dense0 = nn.Linear(cfg.HSIZE, cfg.HSIZE * 2) self.dense1 = nn.Linear(cfg.HSIZE * 2, ans_size) self.layer_norm = LayerNorm(cfg.HSIZE * 2, eps=1e-12) def forward(self, input_0): primals_1 = self.dense0.weight primals_2 = self.dense0.bias primals_6 = self.dense1.weight primals_7 = self.dense1.bias primals_4 = self.layer_norm.weight primals_5 = self.layer_norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
MILVLG/rosita
MmQAHead
false
8,768
[ "Apache-2.0" ]
32
13f7e68350a64b4b5b2c44b9fa4e7448bbe7420c
https://github.com/MILVLG/rosita/tree/13f7e68350a64b4b5b2c44b9fa4e7448bbe7420c
DNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/k2/ck2xr6bde5c2we5bhkosztxunllmhw2c6gxyuqs72s72p2i5i3d5.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/tw/ctwfxjakvb4rsmfujxdnrqyz22cpgahyyotdegtjxup5bq3uuyld.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_per_fused__log_softmax_1 = async_compile.triton('triton_per_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (1, 784), (784, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (10, 1), (1, 1)) assert_size_stride(primals_7, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 1), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 4, grid=grid(4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, primals_4, out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, primals_5, 4, grid=grid(4), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (1, 10), (1, 1), 0), alpha=1, beta=1, out=buf4) del primals_7 buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_per_fused__log_softmax_1.run(buf4, buf7, 4, 10, grid=grid(4), stream=stream0) del buf4 return (buf7, primals_1, buf1, buf3, buf7, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((10, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx class DNN(nn.Module): def __init__(self, config): super(DNN, self).__init__() self.fc1 = nn.Linear(784, int(config['hidden_layer1'])) self.dropout = nn.Dropout2d(float(config['drop_out'])) self.fc2 = nn.Linear(int(config['hidden_layer1']), int(config[ 'hidden_layer2'])) self.fc = nn.Linear(int(config['hidden_layer2']), 10) def forward(self, x): x = x.view(-1, 28 * 28) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = self.fc(x) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_layer1=1, drop_out=0.5, hidden_layer2=1)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (1, 784), (784, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (10, 1), (1, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 1), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(4)](buf1, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, primals_4, out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(4)](buf3, primals_5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (1, 10), (1, 1), 0), alpha=1, beta=1, out=buf4) del primals_7 buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_1[grid(4)](buf4, buf7, 4, 10, XBLOCK= 1, num_warps=2, num_stages=1) del buf4 return buf7, primals_1, buf1, buf3, buf7, primals_6, primals_4 class DNNNew(nn.Module): def __init__(self, config): super(DNNNew, self).__init__() self.fc1 = nn.Linear(784, int(config['hidden_layer1'])) self.dropout = nn.Dropout2d(float(config['drop_out'])) self.fc2 = nn.Linear(int(config['hidden_layer1']), int(config[ 'hidden_layer2'])) self.fc = nn.Linear(int(config['hidden_layer2']), 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc.weight primals_7 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
AmberLJC/Fluid
DNN
false
8,769
[ "Apache-2.0" ]
12
85dee374eb2a1c96fecea83d5484ad83d1739e95
https://github.com/AmberLJC/Fluid/tree/85dee374eb2a1c96fecea83d5484ad83d1739e95
CLSHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/qs/cqsgbydauqwdluexymrszxq7nqjh3tcbqqiq5ccrsj74pb354tqf.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/27/c27nz7qakwg4mesqev3mbp5r3cm7x6cfgpenxv4qqlpbpvqn6dpc.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ag/cagdooqnmqsu3yijgx2xhwfomhimfdtgaadz5y4zn3ej2iv7onro.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils import torch.nn.functional as F class CLSHead(nn.Module): def __init__(self, config, init_weights=None): super(CLSHead, self).__init__() self.layer_1 = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(p=config.dropout) self.layer_2 = nn.Linear(config.d_model, config.n_vocab) if init_weights is not None: self.layer_2.weight = init_weights def forward(self, x): x = self.dropout(torch.tanh(self.layer_1(x))) return F.log_softmax(self.layer_2(x), dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(d_model=4, dropout=0.5, n_vocab=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf2, buf3, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__log_softmax_2[grid(256)](buf3, buf4, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf4, primals_4 class CLSHeadNew(nn.Module): def __init__(self, config, init_weights=None): super(CLSHeadNew, self).__init__() self.layer_1 = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(p=config.dropout) self.layer_2 = nn.Linear(config.d_model, config.n_vocab) if init_weights is not None: self.layer_2.weight = init_weights def forward(self, input_0): primals_1 = self.layer_1.weight primals_2 = self.layer_1.bias primals_4 = self.layer_2.weight primals_5 = self.layer_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
MSU-MLSys-Lab/CATE
CLSHead
false
8,770
[ "Apache-2.0" ]
15
654c393d7df888d2c3f3b90f9e6752faa061157e
https://github.com/MSU-MLSys-Lab/CATE/tree/654c393d7df888d2c3f3b90f9e6752faa061157e
FrameAvgPool
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/ot/cot6lx5vmjfaadv4ruppqzp2p72zlmae27x2tfmhqrgu7vixuys2.py # Topologically Sorted Source Nodes: [vis_h], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # vis_h => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/k5/ck5obavbjlmsbp54ahh7lrfvfy6gvhbdjd4qk2zuxhfierldl4uh.py # Topologically Sorted Source Nodes: [vis_h_1], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # vis_h_1 => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%unsqueeze_1, [1, 4], [1, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [vis_h], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 16, grid=grid(16), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [vis_h_1], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_1.run(buf1, buf2, 4, grid=grid(4), stream=stream0) return (reinterpret_tensor(buf2, (4, 1), (1, 1), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn class FrameAvgPool(nn.Module): def __init__(self, cfg): super(FrameAvgPool, self).__init__() input_size = cfg.INPUT_SIZE hidden_size = cfg.HIDDEN_SIZE kernel_size = cfg.KERNEL_SIZE stride = cfg.STRIDE self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1) self.avg_pool = nn.AvgPool1d(kernel_size, stride) def forward(self, visual_input): vis_h = torch.relu(self.vis_conv(visual_input)) vis_h = self.avg_pool(vis_h) return vis_h def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(INPUT_SIZE=4, HIDDEN_SIZE=4, KERNEL_SIZE=4, STRIDE=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, primals_2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) triton_poi_fused_avg_pool2d_1[grid(4)](buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) return reinterpret_tensor(buf2, (4, 1), (1, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), buf3 class FrameAvgPoolNew(nn.Module): def __init__(self, cfg): super(FrameAvgPoolNew, self).__init__() input_size = cfg.INPUT_SIZE hidden_size = cfg.HIDDEN_SIZE kernel_size = cfg.KERNEL_SIZE stride = cfg.STRIDE self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1) self.avg_pool = nn.AvgPool1d(kernel_size, stride) def forward(self, input_0): primals_1 = self.vis_conv.weight primals_2 = self.vis_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
EGO4D/episodic-memory
FrameAvgPool
false
8,771
[ "MIT" ]
27
2a3464882cd4f665c358c1b05a6397339e33c2e1
https://github.com/EGO4D/episodic-memory/tree/2a3464882cd4f665c358c1b05a6397339e33c2e1
ImagePairEncoderV2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/gu/cguuvdmusg3iocn6ziw6fgubjmklbzkpclz42fndk7k5bkaifo26.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 576 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 9 y1 = (yindex // 9) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (9*x2) + (225*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/rn/crnyprxxffxsemdl7cafgn5zgu654ifya2btu6egdbzld2lcxxkx.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/7b/c7byk5elewxzvvr6v5evo2zscjamrnsn5ts2ihcnqtjhhiazem3q.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/oo/coomxp5ocb662bt23w2irmsiea3r7ufeijxirvirckjldiznkjm6.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (6400*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ig/cigvpzbgbylgj7fl2v3oqw7rwpks6usfb7pqmfzeg7pfx6kgdy7a.py # Topologically Sorted Source Nodes: [imgs], Original ATen: [aten.cat] # Source node to ATen node mapping: # imgs => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %sub], 1), kwargs = {}) triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 147456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 9 x1 = (xindex // 9) % 4096 x2 = (xindex // 36864) x3 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + (4096*x0) + (12288*x2)), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 6, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x1 + (4096*((-3) + x0)) + (12288*x2)), tmp9, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 9, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr0 + (x1 + (4096*((-6) + x0)) + (12288*x2)), tmp11, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + (x1 + (4096*((-6) + x0)) + (12288*x2)), tmp11, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp11, tmp16, tmp17) tmp19 = tl.where(tmp9, tmp10, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + (x3), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/4t/c4tx3z2ql4pk7izbzhcairjw2duac45ipbpzqu5hort32w6lcajl.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zc/czciwluosnogs637gomjqhyzguobsybqtuj6ibgobwhvtwtcxqps.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_5, %primals_6, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 86528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/62/c6276nywkcm6i2xbljwpgunw5p2r3l64gcvadbcjiaz4wj23g72i.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_7, %primals_8, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/6m/c6mk7octwof27xsicdmycphjbe2xflmbg6iblspnojg3zovxak2e.py # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_9, %primals_10, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (64, 9, 5, 5), (225, 25, 5, 1)) assert_size_stride(primals_4, (64, ), (1, )) assert_size_stride(primals_5, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_6, (128, ), (1, )) assert_size_stride(primals_7, (256, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_8, (256, ), (1, )) assert_size_stride(primals_9, (512, 256, 5, 5), (6400, 25, 5, 1)) assert_size_stride(primals_10, (512, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 9, 5, 5), (225, 1, 45, 9), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_3, buf0, 576, 25, grid=grid(576, 25), stream=stream0) del primals_3 buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_5, buf1, 8192, 25, grid=grid(8192, 25), stream=stream0) del primals_5 buf2 = empty_strided_cuda((256, 128, 5, 5), (3200, 1, 640, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_7, buf2, 32768, 25, grid=grid(32768, 25), stream=stream0) del primals_7 buf3 = empty_strided_cuda((512, 256, 5, 5), (6400, 1, 1280, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_9, buf3, 131072, 25, grid=grid(131072, 25), stream=stream0) del primals_9 buf4 = empty_strided_cuda((4, 9, 64, 64), (36864, 1, 576, 9), torch.float32) # Topologically Sorted Source Nodes: [imgs], Original ATen: [aten.cat] triton_poi_fused_cat_4.run(primals_1, primals_2, buf4, 147456, grid=grid(147456), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 30, 30), (57600, 1, 1920, 64)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf6, primals_4, 230400, grid=grid(230400), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 13, 13), (21632, 1, 1664, 128)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf8, primals_6, 86528, grid=grid(86528), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 256, 5, 5), (6400, 1, 1280, 256)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf10, primals_8, 25600, grid=grid(25600), stream=stream0) del primals_8 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 512, 1, 1), (512, 1, 512, 512)) buf12 = reinterpret_tensor(buf11, (4, 512, 1, 1), (512, 1, 2048, 2048), 0); del buf11 # reuse buf13 = empty_strided_cuda((4, 512, 1, 1), (512, 1, 512, 512), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_8.run(buf12, primals_10, buf13, 2048, grid=grid(2048), stream=stream0) del primals_10 return (reinterpret_tensor(buf12, (4, 512), (512, 1), 0), buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, 9, 5, 5), (225, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((512, 256, 5, 5), (6400, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class ImagePairEncoderV2(nn.Module): def __init__(self, init_scale=1.0, bias=True, no_weight_init=False): super(ImagePairEncoderV2, self).__init__() self.conv1 = nn.Conv2d(9, 64, kernel_size=5, stride=2, bias=bias) self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, bias=bias) self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, bias=bias) self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=1, bias=bias) if not no_weight_init: for layer in (self.conv1, self.conv2, self.conv3, self.conv4): nn.init.orthogonal_(layer.weight, init_scale) def forward(self, src_imgs, dst_imgs): imgs = torch.cat([src_imgs, dst_imgs, src_imgs - dst_imgs], dim=1) x = F.relu(self.conv1(imgs)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) return x.view(x.size(0), -1) def get_inputs(): return [torch.rand([4, 3, 64, 64]), torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 576 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 9 y1 = yindex // 9 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 9 * x2 + 225 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 6400 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 9 x1 = xindex // 9 % 4096 x2 = xindex // 36864 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + 4096 * x0 + 12288 * x2), tmp4, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 6, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x1 + 4096 * (-3 + x0) + 12288 * x2), tmp9, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 9, tl.int64) tmp14 = tl.load(in_ptr0 + (x1 + 4096 * (-6 + x0) + 12288 * x2), tmp11, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr1 + (x1 + 4096 * (-6 + x0) + 12288 * x2), tmp11, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 - tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp11, tmp16, tmp17) tmp19 = tl.where(tmp9, tmp10, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + x3, tmp20, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 86528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (64, 9, 5, 5), (225, 25, 5, 1)) assert_size_stride(primals_4, (64,), (1,)) assert_size_stride(primals_5, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_6, (128,), (1,)) assert_size_stride(primals_7, (256, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_8, (256,), (1,)) assert_size_stride(primals_9, (512, 256, 5, 5), (6400, 25, 5, 1)) assert_size_stride(primals_10, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 9, 5, 5), (225, 1, 45, 9), torch.float32 ) get_raw_stream(0) triton_poi_fused_0[grid(576, 25)](primals_3, buf0, 576, 25, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_1[grid(8192, 25)](primals_5, buf1, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_5 buf2 = empty_strided_cuda((256, 128, 5, 5), (3200, 1, 640, 128), torch.float32) triton_poi_fused_2[grid(32768, 25)](primals_7, buf2, 32768, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_7 buf3 = empty_strided_cuda((512, 256, 5, 5), (6400, 1, 1280, 256), torch.float32) triton_poi_fused_3[grid(131072, 25)](primals_9, buf3, 131072, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_9 buf4 = empty_strided_cuda((4, 9, 64, 64), (36864, 1, 576, 9), torch .float32) triton_poi_fused_cat_4[grid(147456)](primals_1, primals_2, buf4, 147456, XBLOCK=512, num_warps=8, num_stages=1) del primals_1 del primals_2 buf5 = extern_kernels.convolution(buf4, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 30, 30), (57600, 1, 1920, 64)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_5[grid(230400)](buf6, primals_4, 230400, XBLOCK=512, num_warps=8, num_stages=1) del primals_4 buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 13, 13), (21632, 1, 1664, 128)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_6[grid(86528)](buf8, primals_6, 86528, XBLOCK=512, num_warps=8, num_stages=1) del primals_6 buf9 = extern_kernels.convolution(buf8, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 256, 5, 5), (6400, 1, 1280, 256)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(25600)](buf10, primals_8, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf11 = extern_kernels.convolution(buf10, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 512, 1, 1), (512, 1, 512, 512)) buf12 = reinterpret_tensor(buf11, (4, 512, 1, 1), (512, 1, 2048, 2048), 0) del buf11 buf13 = empty_strided_cuda((4, 512, 1, 1), (512, 1, 512, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(2048)]( buf12, primals_10, buf13, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_10 return reinterpret_tensor(buf12, (4, 512), (512, 1), 0 ), buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, buf13 class ImagePairEncoderV2New(nn.Module): def __init__(self, init_scale=1.0, bias=True, no_weight_init=False): super(ImagePairEncoderV2New, self).__init__() self.conv1 = nn.Conv2d(9, 64, kernel_size=5, stride=2, bias=bias) self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, bias=bias) self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, bias=bias) self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=1, bias=bias) if not no_weight_init: for layer in (self.conv1, self.conv2, self.conv3, self.conv4): nn.init.orthogonal_(layer.weight, init_scale) def forward(self, input_0, input_1): primals_3 = self.conv1.weight primals_4 = self.conv1.bias primals_5 = self.conv2.weight primals_6 = self.conv2.bias primals_7 = self.conv3.weight primals_8 = self.conv3.bias primals_9 = self.conv4.weight primals_10 = self.conv4.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
KH-Kyle/rmp_nav
ImagePairEncoderV2
false
8,772
[ "MIT" ]
30
d598fe70664a4cdc0e9b9dd4b52e84aa3de1b551
https://github.com/KH-Kyle/rmp_nav/tree/d598fe70664a4cdc0e9b9dd4b52e84aa3de1b551
ImageEncoderV3
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/2e/c2ejbndsydgdlx3t7hlr3yp3fq4trt3vlqbekgbxd5e267ve6i24.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (75*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/az/cazvf33aclbntgyixs3zlm6bdzs672xtry2xl6pc3l3sjzwrnks5.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/3f/c3fce2ynxiwmidqobqqnujkyvufqfa2rcv2e6tznaak5yni6z55l.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/gx/cgx4sut6abwxzyemm34tpaqg74v5l3fdxcd2cmbfp63igpeaohu5.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/4b/c4bbntm3vvfukoqx7ztbcvw7vtgn23o66sboceuefvfddnxreelt.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (6400*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/4t/c4tx3z2ql4pk7izbzhcairjw2duac45ipbpzqu5hort32w6lcajl.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zc/czciwluosnogs637gomjqhyzguobsybqtuj6ibgobwhvtwtcxqps.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 86528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/62/c6276nywkcm6i2xbljwpgunw5p2r3l64gcvadbcjiaz4wj23g72i.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/6m/c6mk7octwof27xsicdmycphjbe2xflmbg6iblspnojg3zovxak2e.py # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (256, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (512, 256, 5, 5), (6400, 25, 5, 1)) assert_size_stride(primals_9, (512, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 5, 5), (75, 1, 15, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 192, 25, grid=grid(192, 25), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 8192, 25, grid=grid(8192, 25), stream=stream0) del primals_4 buf3 = empty_strided_cuda((256, 128, 5, 5), (3200, 1, 640, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 32768, 25, grid=grid(32768, 25), stream=stream0) del primals_6 buf4 = empty_strided_cuda((512, 256, 5, 5), (6400, 1, 1280, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 131072, 25, grid=grid(131072, 25), stream=stream0) del primals_8 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 30, 30), (57600, 1, 1920, 64)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf6, primals_2, 230400, grid=grid(230400), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 13, 13), (21632, 1, 1664, 128)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf8, primals_5, 86528, grid=grid(86528), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 256, 5, 5), (6400, 1, 1280, 256)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf10, primals_7, 25600, grid=grid(25600), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 512, 1, 1), (512, 1, 512, 512)) buf12 = reinterpret_tensor(buf11, (4, 512, 1, 1), (512, 1, 2048, 2048), 0); del buf11 # reuse buf13 = empty_strided_cuda((4, 512, 1, 1), (512, 1, 512, 512), torch.bool) # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_8.run(buf12, primals_9, buf13, 2048, grid=grid(2048), stream=stream0) del primals_9 return (reinterpret_tensor(buf12, (4, 512), (512, 1), 0), buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((512, 256, 5, 5), (6400, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class ImageEncoderV3(nn.Module): def __init__(self, output_dim=512, init_scale=1.0, residual_link=False): super(ImageEncoderV3, self).__init__() self.residual_link = residual_link self.conv1 = nn.Conv2d(3, output_dim // 8, kernel_size=5, stride=2) if residual_link: self.res_fc1 = nn.Conv2d(output_dim // 8, output_dim // 4, kernel_size=1, stride=2) self.conv2 = nn.Conv2d(output_dim // 8, output_dim // 4, kernel_size=5, stride=2) if residual_link: self.res_fc2 = nn.Conv2d(output_dim // 4, output_dim // 2, kernel_size=1, stride=2) self.conv3 = nn.Conv2d(output_dim // 4, output_dim // 2, kernel_size=5, stride=2) if residual_link: self.res_fc3 = nn.Conv2d(output_dim // 2, output_dim, kernel_size=1, stride=1) self.conv4 = nn.Conv2d(output_dim // 2, output_dim, kernel_size=5, stride=1) for layer in (self.conv1, self.conv2, self.conv3, self.conv4): nn.init.orthogonal_(layer.weight, init_scale) def forward(self, imgs): if self.residual_link: x = F.relu(self.conv1(imgs)) x = F.relu(self.res_fc1(x[:, :, 2:-2, 2:-2]) + self.conv2(x)) x = F.relu(self.res_fc2(x[:, :, 2:-2, 2:-2]) + self.conv3(x)) x = F.relu(self.res_fc3(x[:, :, 2:-2, 2:-2]) + self.conv4(x)) else: x = F.relu(self.conv1(imgs)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) return x.view(x.size(0), -1) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 6400 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 86528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (64, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (512, 256, 5, 5), (6400, 25, 5, 1)) assert_size_stride(primals_9, (512,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 5, 5), (75, 1, 15, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 25)](primals_1, buf0, 192, 25, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_2[grid(8192, 25)](primals_4, buf2, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((256, 128, 5, 5), (3200, 1, 640, 128), torch.float32) triton_poi_fused_3[grid(32768, 25)](primals_6, buf3, 32768, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((512, 256, 5, 5), (6400, 1, 1280, 256), torch.float32) triton_poi_fused_4[grid(131072, 25)](primals_8, buf4, 131072, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 30, 30), (57600, 1, 1920, 64)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_5[grid(230400)](buf6, primals_2, 230400, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 13, 13), (21632, 1, 1664, 128)) buf8 = buf7 del buf7 triton_poi_fused_convolution_relu_6[grid(86528)](buf8, primals_5, 86528, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 256, 5, 5), (6400, 1, 1280, 256)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_7[grid(25600)](buf10, primals_7, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 512, 1, 1), (512, 1, 512, 512)) buf12 = reinterpret_tensor(buf11, (4, 512, 1, 1), (512, 1, 2048, 2048), 0) del buf11 buf13 = empty_strided_cuda((4, 512, 1, 1), (512, 1, 512, 512), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(2048)]( buf12, primals_9, buf13, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return reinterpret_tensor(buf12, (4, 512), (512, 1), 0 ), buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, buf13 class ImageEncoderV3New(nn.Module): def __init__(self, output_dim=512, init_scale=1.0, residual_link=False): super(ImageEncoderV3New, self).__init__() self.residual_link = residual_link self.conv1 = nn.Conv2d(3, output_dim // 8, kernel_size=5, stride=2) if residual_link: self.res_fc1 = nn.Conv2d(output_dim // 8, output_dim // 4, kernel_size=1, stride=2) self.conv2 = nn.Conv2d(output_dim // 8, output_dim // 4, kernel_size=5, stride=2) if residual_link: self.res_fc2 = nn.Conv2d(output_dim // 4, output_dim // 2, kernel_size=1, stride=2) self.conv3 = nn.Conv2d(output_dim // 4, output_dim // 2, kernel_size=5, stride=2) if residual_link: self.res_fc3 = nn.Conv2d(output_dim // 2, output_dim, kernel_size=1, stride=1) self.conv4 = nn.Conv2d(output_dim // 2, output_dim, kernel_size=5, stride=1) for layer in (self.conv1, self.conv2, self.conv3, self.conv4): nn.init.orthogonal_(layer.weight, init_scale) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
KH-Kyle/rmp_nav
ImageEncoderV3
false
8,773
[ "MIT" ]
30
d598fe70664a4cdc0e9b9dd4b52e84aa3de1b551
https://github.com/KH-Kyle/rmp_nav/tree/d598fe70664a4cdc0e9b9dd4b52e84aa3de1b551