entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
DilatedResidualLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/fk/cfkcunh3plyysuvib63zgkougyqv2ia22pa4qcifvxy3tij7w7nx.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/np/cnpq67ju2jjvmhdlii5rqv3ajv3tl7ugd3lald4s6jzn2wy4gvbv.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %squeeze_1), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 16, grid=grid(16), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4), (0, 4, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4), (16, 4, 1)) buf3 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf3, primals_3, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 return (buf3, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 4), (16, 4, 1), 0), buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class DilatedResidualLayer(nn.Module): def __init__(self, dilation, in_channels, out_channels): super(DilatedResidualLayer, self).__init__() self.conv_dilated = nn.Conv1d(in_channels, out_channels, 3, padding =dilation, dilation=dilation) self.conv_1x1 = nn.Conv1d(out_channels, out_channels, 1) self.dropout = nn.Dropout() def forward(self, x): out = F.relu(self.conv_dilated(x)) out = self.conv_1x1(out) out = self.dropout(out) return x + out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dilation': 1, 'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, primals_2, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4 ), (0, 4, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4), (16, 4, 1)) buf3 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 triton_poi_fused_add_1[grid(16)](buf3, primals_3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 4), (16, 4, 1), 0 ), buf4 class DilatedResidualLayerNew(nn.Module): def __init__(self, dilation, in_channels, out_channels): super(DilatedResidualLayerNew, self).__init__() self.conv_dilated = nn.Conv1d(in_channels, out_channels, 3, padding =dilation, dilation=dilation) self.conv_1x1 = nn.Conv1d(out_channels, out_channels, 1) self.dropout = nn.Dropout() def forward(self, input_0): primals_1 = self.conv_dilated.weight primals_2 = self.conv_dilated.bias primals_4 = self.conv_1x1.weight primals_5 = self.conv_1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
cmhungsteve/SSTDA
DilatedResidualLayer
false
15,045
[ "MIT" ]
154
9c5e1df952bd122ea474046d91e3ac6fa79ec312
https://github.com/cmhungsteve/SSTDA/tree/9c5e1df952bd122ea474046d91e3ac6fa79ec312
MeanEmbedding
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zk/czki36zeblnkviybg6yezzyo5v2j46umqqhgr5ea6dl6n3lryo7s.py # Topologically Sorted Source Nodes: [summed, truediv], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # summed => sum_1 # truediv => div # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [-2]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %expand), kwargs = {}) triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [summed, truediv], Original ATen: [aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.multiprocessing import torch.utils.data import torch.nn.modules.loss class MeanEmbedding(nn.Module): """Mean embedding class.""" def __init__(self): super(MeanEmbedding, self).__init__() def forward(self, emb, len_): """Compute average embeddings. Parameters ---------- emb : torch.Tensor The input embedding tensor. len_ : torch.Tensor The sequence length tensor. Returns ------- torch.Tensor The average embedding tensor. """ summed = torch.sum(emb, dim=-2) len_ = len_.unsqueeze(-1).expand_as(summed).float() return summed / len_ def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.multiprocessing import torch.utils.data import torch.nn.modules.loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class MeanEmbeddingNew(nn.Module): """Mean embedding class.""" def __init__(self): super(MeanEmbeddingNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cminusQAQ/graph4nlp
MeanEmbedding
false
15,046
[ "Apache-2.0" ]
1,269
d980e897131f1b9d3766750c06316d94749904fa
https://github.com/cminusQAQ/graph4nlp/tree/d980e897131f1b9d3766750c06316d94749904fa
SoftDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gd/cgdtd7ki7lurypoeyfwjebdfquygdeupjef4ltfbbbdk5u7owcpl.py # Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # intersection => mul # sum_1 => sum_1 # sum_2 => sum_2 # sum_3 => sum_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {}) triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + (x0), tmp7, xmask) tl.store(out_ptr1 + (x0), tmp11, xmask) tl.store(out_ptr2 + (x0), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vq/cvqiixp4wmb73ig2cla6idbqq7i6vd5n3qmdluadrv32f52pdgw3.py # Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, score, sum_4, truediv_1, score_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # mul_1 => mul_1 # score => div # score_1 => sub # sum_4 => sum_4 # truediv_1 => div_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp6 = tl.load(in_ptr2 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp1 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, score, sum_4, truediv_1, score_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub] triton_per_fused_add_div_mul_rsub_sum_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0) del buf0 del buf1 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class SoftDiceLoss(nn.Module): def __init__(self, weight=None, size_average=True): super(SoftDiceLoss, self).__init__() def forward(self, logits, targets): num = targets.size(0) probs = F.sigmoid(logits) m1 = probs.view(num, -1) m2 = targets.view(num, -1) intersection = m1 * m2 score = 2.0 * (intersection.sum(1) + 1) / (m1.sum(1) + m2.sum(1) + 1) score = 1 - score.sum() / num return score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp1 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp13 = 0.25 tmp14 = tmp12 * tmp13 tmp15 = tmp1 - tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, class SoftDiceLossNew(nn.Module): def __init__(self, weight=None, size_average=True): super(SoftDiceLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cmarasinou/carvana-challenge
SoftDiceLoss
false
15,047
[ "MIT" ]
93
4e1c43f306cfbef1df267acfce59bdcf19504850
https://github.com/cmarasinou/carvana-challenge/tree/4e1c43f306cfbef1df267acfce59bdcf19504850
InnerProductDecoder
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mb/cmb72vxh36b4k6lvmt4562lj3nrqtpyzst2qbon2yqx22gdjfa7x.py # Topologically Sorted Source Nodes: [adj], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # adj => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mm,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [adj], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(buf1, 16, grid=grid(16), stream=stream0) return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F import torch.multiprocessing import torch.utils.data import torch.nn.modules.loss class InnerProductDecoder(nn.Module): """Decoder for using inner product for prediction.""" def __init__(self, dropout, act=torch.sigmoid): super(InnerProductDecoder, self).__init__() self.dropout = dropout self.act = act def forward(self, z): z = F.dropout(z, self.dropout, training=self.training) adj = self.act(torch.mm(z, z.t())) return adj def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.multiprocessing import torch.utils.data import torch.nn.modules.loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(16)](buf1, 16, XBLOCK=16, num_warps =1, num_stages=1) return buf1, class InnerProductDecoderNew(nn.Module): """Decoder for using inner product for prediction.""" def __init__(self, dropout, act=torch.sigmoid): super(InnerProductDecoderNew, self).__init__() self.dropout = dropout self.act = act def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cminusQAQ/graph4nlp
InnerProductDecoder
false
15,048
[ "Apache-2.0" ]
1,269
d980e897131f1b9d3766750c06316d94749904fa
https://github.com/cminusQAQ/graph4nlp/tree/d980e897131f1b9d3766750c06316d94749904fa
BCELoss2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jl/cjlfb7i5sda6iuixuufqhuq6ofisyq3co2j2rvs4i66aznhxymc2.py # Topologically Sorted Source Nodes: [binary_cross_entropy], Original ATen: [aten.binary_cross_entropy] # Source node to ATen node mapping: # binary_cross_entropy => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%view,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) triton_per_fused_binary_cross_entropy_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy], Original ATen: [aten.binary_cross_entropy] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class BCELoss2d(nn.Module): def __init__(self, weight=None, size_average=True): super(BCELoss2d, self).__init__() self.bce_loss = nn.BCELoss(weight, size_average) def forward(self, logits, targets): probs = F.sigmoid(logits) probs_flat = probs.view(-1) targets_flat = targets.view(-1) return self.bce_loss(probs_flat, targets_flat) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = -tmp4 tmp6 = libdevice.log1p(tmp5) tmp7 = -100.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp2 * tmp8 tmp10 = tl_math.log(tmp4) tmp11 = triton_helpers.maximum(tmp10, tmp7) tmp12 = tmp0 * tmp11 tmp13 = tmp9 - tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCELoss2dNew(nn.Module): def __init__(self, weight=None, size_average=True): super(BCELoss2dNew, self).__init__() self.bce_loss = nn.BCELoss(weight, size_average) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cmarasinou/carvana-challenge
BCELoss2d
false
15,049
[ "MIT" ]
93
4e1c43f306cfbef1df267acfce59bdcf19504850
https://github.com/cmarasinou/carvana-challenge/tree/4e1c43f306cfbef1df267acfce59bdcf19504850
mlpblock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nu/cnuuaznpt4szfn74bn46qfjkdypvlkfa5x44ywjpperdjt2a66rj.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 10), (10, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 640, grid=grid(640), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 4), (1, 10), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 10), (10, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((10, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 10), (10, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class linearblock(nn.Module): def __init__(self, in_features, out_features, bias=True, dropout='none'): super(linearblock, self).__init__() self.conv = nn.Linear(in_features, out_features, bias=bias) self.relu = nn.ReLU(inplace=True) self.dropout = nn.Dropout(inplace=True) self.dropoutoption = dropout def forward(self, x): x = self.conv(x) x = self.relu(x) if self.dropoutoption == 'normal': x = self.dropout(x) return x class mlpblock(nn.Module): def __init__(self, inputdim, outputdim, nlayer=2, hiddendim=10, activation='ReLU'): super(mlpblock, self).__init__() self.nlayer = nlayer self.hiddendim = hiddendim self.inputdim = inputdim self.outputdim = outputdim if activation == 'ReLU': self.act = F.relu else: raise NotImplementedError self.fc1 = nn.Linear(self.inputdim, self.hiddendim) fc_iter = [] for n in range(self.nlayer - 2): fc_iter.append(linearblock(self.hiddendim, self.hiddendim)) self.fc_iter = nn.Sequential(*fc_iter) self.fc_final = nn.Linear(self.hiddendim, self.outputdim) def forward(self, x): x = self.act(self.fc1(x)) x = self.fc_iter(x) x = self.fc_final(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputdim': 4, 'outputdim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 640 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 10 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (10, 4), (4, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 10), (10, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 10), (10, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 10), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 10), (160, 40, 10, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 10), (160, 40, 10, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(640)](buf1, primals_2, buf3, 640, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 10), (10, 1), 0), reinterpret_tensor(primals_4, (10, 4), (1, 10), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 10), (10, 1), 0), primals_4, buf3 class linearblock(nn.Module): def __init__(self, in_features, out_features, bias=True, dropout='none'): super(linearblock, self).__init__() self.conv = nn.Linear(in_features, out_features, bias=bias) self.relu = nn.ReLU(inplace=True) self.dropout = nn.Dropout(inplace=True) self.dropoutoption = dropout def forward(self, x): x = self.conv(x) x = self.relu(x) if self.dropoutoption == 'normal': x = self.dropout(x) return x class mlpblockNew(nn.Module): def __init__(self, inputdim, outputdim, nlayer=2, hiddendim=10, activation='ReLU'): super(mlpblockNew, self).__init__() self.nlayer = nlayer self.hiddendim = hiddendim self.inputdim = inputdim self.outputdim = outputdim if activation == 'ReLU': self.act = F.relu else: raise NotImplementedError self.fc1 = nn.Linear(self.inputdim, self.hiddendim) fc_iter = [] for n in range(self.nlayer - 2): fc_iter.append(linearblock(self.hiddendim, self.hiddendim)) self.fc_iter = nn.Sequential(*fc_iter) self.fc_final = nn.Linear(self.hiddendim, self.outputdim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc_final.weight primals_5 = self.fc_final.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
coallaoh/WhitenBlackBox
mlpblock
false
15,050
[ "MIT" ]
46
816363c59a11248e79ffed70f1a14510b0967dab
https://github.com/coallaoh/WhitenBlackBox/tree/816363c59a11248e79ffed70f1a14510b0967dab
ShiftedSoftplus
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gs/cgst5bn3xbeqcn2yvtb23cshsvuia6kpd4lotl6ljsad6vyq6bqx.py # Topologically Sorted Source Nodes: [sub, softplus], Original ATen: [aten.sub, aten.softplus] # Source node to ATen node mapping: # softplus => exp, gt, log1p, where # sub => sub # Graph fragment: # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sub, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %sub, %log1p), kwargs = {}) triton_poi_fused_softplus_sub_0 = async_compile.triton('triton_poi_fused_softplus_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = 20.0 tmp4 = tmp2 > tmp3 tmp5 = tl_math.exp(tmp2) tmp6 = libdevice.log1p(tmp5) tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, softplus], Original ATen: [aten.sub, aten.softplus] stream0 = get_raw_stream(0) triton_poi_fused_softplus_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class ShiftedSoftplus(nn.Module): __constants__ = ['beta', 'threshold'] beta: 'int' threshold: 'int' def __init__(self, beta: 'int'=1, threshold: 'int'=20) ->None: super(ShiftedSoftplus, self).__init__() self.beta = beta self.threshold = threshold def forward(self, x: 'torch.Tensor') ->torch.Tensor: return F.softplus(x - 1, self.beta, self.threshold) def extra_repr(self) ->str: return 'beta={}, threshold={}'.format(self.beta, self.threshold) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp3 = 20.0 tmp4 = tmp2 > tmp3 tmp5 = tl_math.exp(tmp2) tmp6 = libdevice.log1p(tmp5) tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_softplus_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ShiftedSoftplusNew(nn.Module): __constants__ = ['beta', 'threshold'] beta: 'int' threshold: 'int' def __init__(self, beta: 'int'=1, threshold: 'int'=20) ->None: super(ShiftedSoftplusNew, self).__init__() self.beta = beta self.threshold = threshold def extra_repr(self) ->str: return 'beta={}, threshold={}'.format(self.beta, self.threshold) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cmusatyalab/mega-nerf
ShiftedSoftplus
false
15,051
[ "MIT" ]
107
306e06cc316dd4f5c84d0610308bcbc208228fc3
https://github.com/cmusatyalab/mega-nerf/tree/306e06cc316dd4f5c84d0610308bcbc208228fc3
Accuracy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/oh/cohmmq5l7ydiqz6movlhta7j6kdysj3oy6x2xja3srt34mii3od4.py # Topologically Sorted Source Nodes: [prediction], Original ATen: [aten.argmax] # Source node to ATen node mapping: # prediction => argmax # Graph fragment: # %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%arg0_1, 1), kwargs = {}) triton_poi_fused_argmax_0 = async_compile.triton('triton_poi_fused_argmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + (x2), tmp46, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/po/cpould7ijejacl55l5cbia54ogu2rrhx6n2e2y34xgiu65udhskl.py # Topologically Sorted Source Nodes: [correct, float_1, accuracy], Original ATen: [aten.eq, aten._to_copy, aten.mean] # Source node to ATen node mapping: # accuracy => mean # correct => eq # float_1 => convert_element_type # Graph fragment: # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%argmax, %arg1_1), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%convert_element_type,), kwargs = {}) triton_per_fused__to_copy_eq_mean_1 = async_compile.triton('triton_per_fused__to_copy_eq_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_eq_mean_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_eq_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (r2), None) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.float32) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [prediction], Original ATen: [aten.argmax] stream0 = get_raw_stream(0) triton_poi_fused_argmax_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [correct, float_1, accuracy], Original ATen: [aten.eq, aten._to_copy, aten.mean] triton_per_fused__to_copy_eq_mean_1.run(buf2, buf0, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Accuracy(nn.Module): label = 'Accuracy' def forward(self, prediction, truth): prediction = prediction.argmax(dim=1) correct = prediction == truth accuracy = correct.float().mean() return accuracy def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x2, tmp46, xmask) @triton.jit def triton_per_fused__to_copy_eq_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + r2, None) tmp1 = tmp0.to(tl.float32) tmp3 = tmp1 == tmp2 tmp4 = tmp3.to(tl.float32) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = 256.0 tmp9 = tmp7 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_argmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__to_copy_eq_mean_1[grid(1)](buf2, buf0, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, class AccuracyNew(nn.Module): label = 'Accuracy' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cms-flash/beauty-net
Accuracy
false
15,052
[ "MIT" ]
155
668210a95ccb4462d7beff10505e4e83532682f2
https://github.com/cms-flash/beauty-net/tree/668210a95ccb4462d7beff10505e4e83532682f2
ConvBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zv/czvfpj3ah2lefbwpcuw4esv23bxs5a3ab63ply3ntgbsdktepd5v.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 784) % 6 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = (xindex // 14) x2 = (xindex // 1176) x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xe/cxelxvpw3asckozc53rh36773aohp5hqpbp2nos5ymcdqhxvo4bl.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 18816, grid=grid(18816), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0) return (reinterpret_tensor(buf7, (4, 400), (400, 1), 0), primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class ConvBlock(nn.Module): def __init__(self): super(ConvBlock, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1) return reinterpret_tensor(buf7, (4, 400), (400, 1), 0 ), primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6 class ConvBlockNew(nn.Module): def __init__(self): super(ConvBlockNew, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
coasxu/FedMA
ConvBlock
false
15,053
[ "MIT" ]
254
21f4d32338fd2563ebd97c737e3b9f4f470029d9
https://github.com/coasxu/FedMA/tree/21f4d32338fd2563ebd97c737e3b9f4f470029d9
HirarchicalAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py # Topologically Sorted Source Nodes: [u_it], Original ATen: [aten.tanh] # Source node to ATen node mapping: # u_it => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dm/cdmkcxuzpnailvibeivaikqdr4zvashgzwju7qijhq5aizlo3aor.py # Topologically Sorted Source Nodes: [a_it], Original ATen: [aten._softmax] # Source node to ATen node mapping: # a_it => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kt/cktghousutx6xui2sl2rvevzmb7gkacvfhntjq5n2xzeu7v57oz6.py # Topologically Sorted Source Nodes: [a_it], Original ATen: [aten._softmax] # Source node to ATen node mapping: # a_it => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/x5/cx5qa4dyw6ydnehpkcpbpdbuv5qdkuwkqx5hgsacgljy6mj5hebt.py # Topologically Sorted Source Nodes: [a_it, mul, s_i], Original ATen: [aten._softmax, aten.mul, aten.sum] # Source node to ATen node mapping: # a_it => div, sum_1 # mul => mul # s_i => sum_2 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused__softmax_mul_sum_3 = async_compile.triton('triton_poi_fused__softmax_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x1 = (xindex // 4) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask) tmp1 = tl.load(in_ptr1 + (x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask) tmp4 = tl.load(in_ptr1 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask) tmp8 = tl.load(in_ptr1 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask) tmp12 = tl.load(in_ptr1 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [u_it], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [a_it], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [a_it], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [a_it, mul, s_i], Original ATen: [aten._softmax, aten.mul, aten.sum] triton_poi_fused__softmax_mul_sum_3.run(primals_3, buf4, buf5, 64, grid=grid(64), stream=stream0) del buf4 return (buf5, primals_3, buf1, buf2, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from typing import * import torch.utils.data import torch.nn as nn import torch.onnx.operators import torch.optim class HirarchicalAttention(Module): """ ref: Hierarchical Attention Networks for Document Classification """ def __init__(self, hidden_size: 'int'): super(HirarchicalAttention, self).__init__() self.w_linear = nn.Linear(hidden_size, hidden_size) self.u_w = nn.Linear(hidden_size, 1, bias=False) def forward(self, input: 'torch.Tensor') ->torch.Tensor: u_it = torch.tanh(self.w_linear(input)) a_it = torch.softmax(self.u_w(u_it), dim=1) s_i = (input * a_it).sum(dim=1) return s_i def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.nn import Module from typing import * import torch.utils.data import torch.nn as nn import torch.onnx.operators import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x1 = xindex // 4 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask) tmp1 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask) tmp4 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask) tmp8 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask) tmp12 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 triton_poi_fused__softmax_mul_sum_3[grid(64)](primals_3, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 return buf5, primals_3, buf1, buf2, primals_4 class HirarchicalAttentionNew(Module): """ ref: Hierarchical Attention Networks for Document Classification """ def __init__(self, hidden_size: 'int'): super(HirarchicalAttentionNew, self).__init__() self.w_linear = nn.Linear(hidden_size, hidden_size) self.u_w = nn.Linear(hidden_size, 1, bias=False) def forward(self, input_0): primals_1 = self.w_linear.weight primals_2 = self.w_linear.bias primals_4 = self.u_w.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
code-backdoor/code-backdoor
HirarchicalAttention
false
15,054
[ "MIT" ]
71
1eeb3d79aa8a54c8f08e8d0156b569de5edd974e
https://github.com/code-backdoor/code-backdoor/tree/1eeb3d79aa8a54c8f08e8d0156b569de5edd974e
ConvLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/um/cum65j23qchrjf5dndblqgbw6zomhgwfj2obfidtgy7b5j3zwklm.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%primals_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wk/cwk2wao7opapqbjj7klnqrd6tgist3ts3nc5veryzhzstwpx7d4l.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7c/c7cntq6q7d55mwfa7fa3fpnxvxrol7akfutqaf26t2swga7xx3mx.py # Topologically Sorted Source Nodes: [softmax, mul, edges], Original ATen: [aten._softmax, aten.mul, aten.sum] # Source node to ATen node mapping: # edges => sum_2 # mul => mul # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused__softmax_mul_sum_2 = async_compile.triton('triton_poi_fused__softmax_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp8 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp12 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0) del buf0 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax, mul, edges], Original ATen: [aten._softmax, aten.mul, aten.sum] triton_poi_fused__softmax_mul_sum_2.run(primals_2, buf1, buf2, 64, grid=grid(64), stream=stream0) del buf1 return (buf2, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from typing import * import torch.utils.data import torch.nn as nn import torch.onnx.operators import torch.optim class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels): super(ConvLayer, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, 1, 1)) nn.init.constant_(self.weight, 0.1) def forward(self, edges): edges = (edges * F.softmax(self.weight, dim=1)).sum(dim=1) return edges def extra_repr(self) ->str: return 'ConV {}'.format(self.weight.size()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from typing import * import torch.utils.data import torch.nn as nn import torch.onnx.operators import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](primals_1, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_mul_sum_2[grid(64)](primals_2, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 return buf2, primals_1, primals_2 class ConvLayerNew(nn.Module): def __init__(self, in_channels, out_channels): super(ConvLayerNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, 1, 1)) nn.init.constant_(self.weight, 0.1) def extra_repr(self) ->str: return 'ConV {}'.format(self.weight.size()) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
code-backdoor/code-backdoor
ConvLayer
false
15,055
[ "MIT" ]
71
1eeb3d79aa8a54c8f08e8d0156b569de5edd974e
https://github.com/code-backdoor/code-backdoor/tree/1eeb3d79aa8a54c8f08e8d0156b569de5edd974e
SimpleCNNContainerConvBlocks
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/en/cenjzvk5pb3x6qk2rps77hc2bodher7u5w67jenliie3w6lsgggh.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 59536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3721) % 4 x0 = xindex % 3721 x4 = (xindex // 3721) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3744*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/l6/cl6ctkf3gytmouomyg26l2ubfb52mh5rrkh3xay4bskd2tsjr2ak.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 14400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = (xindex // 30) % 30 x4 = (xindex // 900) x3 = (xindex // 3600) x5 = xindex % 3600 tmp0 = tl.load(in_ptr0 + ((2*x0) + (122*x1) + (3744*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (122*x1) + (3744*x4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (61 + (2*x0) + (122*x1) + (3744*x4)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (62 + (2*x0) + (122*x1) + (3744*x4)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5 + (3616*x3)), tmp6, xmask) tl.store(out_ptr1 + (x5 + (3712*x3)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/t6/ct6kjnwahqmikv3vgwn2lh4jo3sw3ek3p257lslvbjt5uoztxuea.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 11664 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 729) % 4 x2 = (xindex // 2916) x4 = xindex % 2916 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + (2944*x2)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kp/ckps5g44y5vntyobn7qn6thh65rztqsfcn4mjdtmp2w3y5sbowx3.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 2704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 13 x1 = (xindex // 13) % 13 x2 = (xindex // 169) % 4 x3 = (xindex // 676) x4 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (54*x1) + (729*x2) + (2944*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (54*x1) + (729*x2) + (2944*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (27 + (2*x0) + (54*x1) + (729*x2) + (2944*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (28 + (2*x0) + (54*x1) + (729*x2) + (2944*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4), tmp6, xmask) tl.store(out_ptr1 + (x4), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 61, 61), (14884, 3721, 61, 1)) buf1 = empty_strided_cuda((4, 4, 61, 61), (14976, 3744, 61, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 59536, grid=grid(59536), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 4, 30, 30), (3616, 900, 30, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 30, 30), (3712, 900, 30, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 14400, grid=grid(14400), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 27, 27), (2916, 729, 27, 1)) buf5 = empty_strided_cuda((4, 4, 27, 27), (2944, 729, 27, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 11664, grid=grid(11664), stream=stream0) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 4, 13, 13), (676, 169, 13, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 13, 13), (676, 169, 13, 1), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 2704, grid=grid(2704), stream=stream0) return (buf6, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class SimpleCNNContainerConvBlocks(nn.Module): def __init__(self, input_channel, num_filters, kernel_size, output_dim=10): super(SimpleCNNContainerConvBlocks, self).__init__() """ A testing cnn container, which allows initializing a CNN with given dims We use this one to estimate matched output of conv blocks num_filters (list) :: number of convolution filters hidden_dims (list) :: number of neurons in hidden layers Assumptions: i) we use only two conv layers and three hidden layers (including the output layer) ii) kernel size in the two conv layers are identical """ self.conv1 = nn.Conv2d(input_channel, num_filters[0], kernel_size) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(num_filters[0], num_filters[1], kernel_size) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'input_channel': 4, 'num_filters': [4, 4], 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 59536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3721 % 4 x0 = xindex % 3721 x4 = xindex // 3721 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3744 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 14400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x4 = xindex // 900 x3 = xindex // 3600 x5 = xindex % 3600 tmp0 = tl.load(in_ptr0 + (2 * x0 + 122 * x1 + 3744 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 122 * x1 + 3744 * x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (61 + 2 * x0 + 122 * x1 + 3744 * x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (62 + 2 * x0 + 122 * x1 + 3744 * x4), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x5 + 3616 * x3), tmp6, xmask) tl.store(out_ptr1 + (x5 + 3712 * x3), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 11664 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 729 % 4 x2 = xindex // 2916 x4 = xindex % 2916 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x4 + 2944 * x2), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 2704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 13 x1 = xindex // 13 % 13 x2 = xindex // 169 % 4 x3 = xindex // 676 x4 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 54 * x1 + 729 * x2 + 2944 * x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 54 * x1 + 729 * x2 + 2944 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (27 + 2 * x0 + 54 * x1 + 729 * x2 + 2944 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (28 + 2 * x0 + 54 * x1 + 729 * x2 + 2944 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x4, tmp6, xmask) tl.store(out_ptr1 + x4, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 61, 61), (14884, 3721, 61, 1)) buf1 = empty_strided_cuda((4, 4, 61, 61), (14976, 3744, 61, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(59536)](buf0, primals_2, buf1, 59536, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 4, 30, 30), (3616, 900, 30, 1), torch .float32) buf3 = empty_strided_cuda((4, 4, 30, 30), (3712, 900, 30, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(14400)](buf1, buf2, buf3, 14400, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 27, 27), (2916, 729, 27, 1)) buf5 = empty_strided_cuda((4, 4, 27, 27), (2944, 729, 27, 1), torch .float32) triton_poi_fused_convolution_relu_2[grid(11664)](buf4, primals_5, buf5, 11664, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 4, 13, 13), (676, 169, 13, 1), torch. float32) buf7 = empty_strided_cuda((4, 4, 13, 13), (676, 169, 13, 1), torch.int8 ) triton_poi_fused_max_pool2d_with_indices_3[grid(2704)](buf5, buf6, buf7, 2704, XBLOCK=128, num_warps=4, num_stages=1) return buf6, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf7 class SimpleCNNContainerConvBlocksNew(nn.Module): def __init__(self, input_channel, num_filters, kernel_size, output_dim=10): super(SimpleCNNContainerConvBlocksNew, self).__init__() """ A testing cnn container, which allows initializing a CNN with given dims We use this one to estimate matched output of conv blocks num_filters (list) :: number of convolution filters hidden_dims (list) :: number of neurons in hidden layers Assumptions: i) we use only two conv layers and three hidden layers (including the output layer) ii) kernel size in the two conv layers are identical """ self.conv1 = nn.Conv2d(input_channel, num_filters[0], kernel_size) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(num_filters[0], num_filters[1], kernel_size) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
coasxu/FedMA
SimpleCNNContainerConvBlocks
false
15,056
[ "MIT" ]
254
21f4d32338fd2563ebd97c737e3b9f4f470029d9
https://github.com/coasxu/FedMA/tree/21f4d32338fd2563ebd97c737e3b9f4f470029d9
UNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jo/cjolh7wy3losq75bea7heuxra52smjn2phczl4xzt2smarbxy3nj.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d => convolution # x => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 32 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/o4/co4xz3bhphdn2kq3lke3433wpdtqt6r3irqbdr7hp46ou2slvxop.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x_1 => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_1, [2, 2]), kwargs = {}) triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/b2/cb2heynzbbb2idhib26qs23x62rr3vu36ahp3tksyhjfahippc67.py # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_2 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_6, %primals_7, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.1), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xj/cxjbrfzbed7bo2iy4m5zuii5z5cssze6tfcgrk2jehpphz5b77jh.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x_4 => avg_pool2d_1 # Graph fragment: # %avg_pool2d_1 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_3, [2, 2]), kwargs = {}) triton_poi_fused_avg_pool2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/le/cleento7jh4h7b7b25wgw4ax6qfmthojxlfqfgkaohjqgn6pqwco.py # Topologically Sorted Source Nodes: [conv2d_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_5 => gt_4, mul_4, where_4 # Graph fragment: # %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_1, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.1), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/s5/cs5zukgmdewmnpcrozw2m273bpclzrkypvc2xaub2gmoc5saabvv.py # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x_7 => avg_pool2d_2 # Graph fragment: # %avg_pool2d_2 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_5, [2, 2]), kwargs = {}) triton_poi_fused_avg_pool2d_5 = async_compile.triton('triton_poi_fused_avg_pool2d_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4u/c4urfqsk2wuyrkcnwy7b2uiwmecrugesubdiuadavwqtcisyhwz4.py # Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_6 => convolution_6 # x_8 => gt_6, mul_6, where_6 # Graph fragment: # %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_2, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_6 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.1), kwargs = {}) # %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_6), kwargs = {}) triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3z/c3zje6b5ccaz3n4winpmxo6y4niaoldocb7ilvkg5sorj2nqvjfa.py # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x_10 => avg_pool2d_3 # Graph fragment: # %avg_pool2d_3 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_7, [2, 2]), kwargs = {}) triton_poi_fused_avg_pool2d_7 = async_compile.triton('triton_poi_fused_avg_pool2d_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/oy/coyjgpdjbipe737iasihk5ensjtmspgnzblyyy7mrlypqho5vuyg.py # Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_8 => convolution_8 # x_11 => gt_8, mul_8, where_8 # Graph fragment: # %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_3, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_8 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 0.1), kwargs = {}) # %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_8, %mul_8), kwargs = {}) triton_poi_fused_convolution_leaky_relu_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 512 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/j6/cj6wkwoaluxhwqnux44qht6o5xye6n3bfqi54esxnpytd6m2qyjn.py # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # x_13 => avg_pool2d_4 # Graph fragment: # %avg_pool2d_4 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_9, [2, 2]), kwargs = {}) triton_poi_fused_avg_pool2d_9 = async_compile.triton('triton_poi_fused_avg_pool2d_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), None, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/i6/ci6sgepehwucwp2knnf7ujr55xjh7bis2i3kdyon6flrsjhhdhyi.py # Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_10 => convolution_10 # x_14 => gt_10, mul_10, where_10 # Graph fragment: # %convolution_10 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_4, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_10 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_10, 0), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_10, 0.1), kwargs = {}) # %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_10, %convolution_10, %mul_10), kwargs = {}) triton_poi_fused_convolution_leaky_relu_10 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4) % 512 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ep/cepxp5elnxw6qvzcibzdejr6ov2i7hn664ixt6w4vzlrorsdstiq.py # Topologically Sorted Source Nodes: [conv2d_11, x_15], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_11 => convolution_11 # x_15 => gt_11 # Graph fragment: # %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_11 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_11, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_11 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4) % 512 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/r4/cr4hpkpezpllmtrycjdjyfyalsg3igxkpp5ddup6ueansg3uhioj.py # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # x_16 => convert_element_type_1 # Graph fragment: # %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) triton_poi_fused__to_copy_12 = async_compile.triton('triton_poi_fused__to_copy_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/it/citeiab2byvsltguyuzd2s2joq6e6z355s7h7bam6hgio5s5cret.py # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # x_16 => add_1, clamp_max # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {}) # %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 1), kwargs = {}) triton_poi_fused_add_clamp_13 = async_compile.triton('triton_poi_fused_add_clamp_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = triton_helpers.minimum(tmp10, tmp9) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tq/ctqegi24pxetbae63246pykqjalfftx6xr5vt4fhudr7ehpmpbyv.py # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] # Source node to ATen node mapping: # x_16 => add, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul_12, sub, sub_2 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_12, 0.5), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ar/carrwnezxoitom5qyzrwvxxe2xsarer32dfybfdk3w4gttj5i277.py # Topologically Sorted Source Nodes: [conv2d_11, x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # conv2d_11 => convolution_11 # x_15 => mul_11, where_11 # x_16 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_4, add_5, add_6, mul_14, mul_15, mul_16, sub_3, sub_4, sub_6 # Graph fragment: # %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_11, 0.1), kwargs = {}) # %where_11 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_11, %convolution_11, %mul_11), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_14), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_15), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {}) # %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_16), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4) % 4 x0 = xindex % 4 x6 = (xindex // 16) x2 = (xindex // 16) % 512 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 2, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + (x4), tmp49, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vm/cvmoqavquuan3erpml2tllmmlw2pfct5mokbplbbjboljuyvw7db.py # Topologically Sorted Source Nodes: [conv2d_12, x_17], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_12 => convolution_12 # x_17 => gt_12 # Graph fragment: # %convolution_12 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_6, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_12 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_12, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_16 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 512 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zq/czqy62awvhrtl5r6fvvk4ufd5wffutbs7uz3a6rvpxyaj5tosmne.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_12, %where_9], 1), kwargs = {}) triton_poi_fused_cat_17 = async_compile.triton('triton_poi_fused_cat_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 16) % 1024 x0 = xindex % 16 x2 = (xindex // 16384) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (8192*x2)), tmp4, other=0.0).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + (16*x1) + (8192*x2)), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 1024, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tl.load(in_ptr3 + (x0 + (16*((-512) + x1)) + (8192*x2)), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + (x3), tmp18, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6w/c6wr4loz4rxs56x2er2z7yyokvbxpzsmffbuifvku2xqaerh75p3.py # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # x_19 => convert_element_type_5 # Graph fragment: # %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {}) triton_poi_fused__to_copy_18 = async_compile.triton('triton_poi_fused__to_copy_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_18(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/oq/coq7wg2jvtpm4oc4zm4dvbkwpc5jinzscvm4jrouu3hlob5nd7rs.py # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # x_19 => add_8, clamp_max_4 # Graph fragment: # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {}) # %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_8, 3), kwargs = {}) triton_poi_fused_add_clamp_19 = async_compile.triton('triton_poi_fused_add_clamp_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_19(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 3, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + (x0), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rh/crhh5ib7z3hkisro2vncldz577bevkgu7k3u5nnclrqmgo3wnuzx.py # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] # Source node to ATen node mapping: # x_19 => add_7, clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_19, sub_7, sub_9 # Graph fragment: # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.5), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 0.5), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_19, 0.5), kwargs = {}) # %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_7, 0.0), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {}) # %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0.0), kwargs = {}) # %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {}) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qk/cqkzmxtnx34qktmz7vfilm2bzsfk2r5cxo3wx6pwurql3crkpejs.py # Topologically Sorted Source Nodes: [conv2d_13, x_18, x_19], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # conv2d_13 => convolution_13 # x_18 => mul_18, where_13 # x_19 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_11, add_12, add_13, mul_21, mul_22, mul_23, sub_10, sub_11, sub_13 # Graph fragment: # %convolution_13 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_13, 0.1), kwargs = {}) # %where_13 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_13, %convolution_13, %mul_18), kwargs = {}) # %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {}) # %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {}) # %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {}) # %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_6), kwargs = {}) # %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_21), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_6), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_22), kwargs = {}) # %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_12, %add_11), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %clamp_max_7), kwargs = {}) # %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mul_23), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 8) % 8 x0 = xindex % 8 x6 = (xindex // 64) x2 = (xindex // 64) % 512 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + (x4), tmp49, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vn/cvntemv5weoouv65lvun2muyb6apyj7dkrnebouaithxvdyd4hl4.py # Topologically Sorted Source Nodes: [conv2d_14, x_20], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_14 => convolution_14 # x_20 => gt_14 # Graph fragment: # %convolution_14 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_13, %primals_30, %primals_31, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_14 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_14, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_22 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3p/c3pazcbmhoubkrcj7s65glics5kpj5vv7x2zlnvymydp46fxyf2m.py # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_14, %where_7], 1), kwargs = {}) triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 512 x0 = xindex % 64 x2 = (xindex // 32768) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 512, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tl.load(in_ptr3 + (x0 + (64*((-256) + x1)) + (16384*x2)), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + (x3), tmp18, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mi/cmiwjqieuspwn256jnrugfvht2dt7ofln2psibayqc3twrtpkngi.py # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # x_22 => convert_element_type_9 # Graph fragment: # %convert_element_type_9 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {}) triton_poi_fused__to_copy_24 = async_compile.triton('triton_poi_fused__to_copy_24', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ki/ckith5u474vepvwaijseaqbn665u5jpg5stc4cj42bnzsgj6uexm.py # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # x_22 => add_15, clamp_max_8 # Graph fragment: # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_9, 1), kwargs = {}) # %clamp_max_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_15, 7), kwargs = {}) triton_poi_fused_add_clamp_25 = async_compile.triton('triton_poi_fused_add_clamp_25', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 7, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + (x0), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lu/cluyprd6omil4csahwtbdldnx2kt7j7znt35dzjdzj4xcxjsppaa.py # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] # Source node to ATen node mapping: # x_22 => add_14, clamp_max_10, clamp_min_10, clamp_min_8, convert_element_type_8, iota_4, mul_26, sub_14, sub_16 # Graph fragment: # %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_4, torch.float32), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.5), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_14, 0.5), kwargs = {}) # %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_26, 0.5), kwargs = {}) # %clamp_min_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_14, 0.0), kwargs = {}) # %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_8, %convert_element_type_11), kwargs = {}) # %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_16, 0.0), kwargs = {}) # %clamp_max_10 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {}) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qc/cqcxmomsd2pfozktj753ije5uupmwdotvhhglolxtdikeyegh5yz.py # Topologically Sorted Source Nodes: [conv2d_15, x_21, x_22], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # conv2d_15 => convolution_15 # x_21 => mul_25, where_15 # x_22 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_18, add_19, add_20, mul_28, mul_29, mul_30, sub_17, sub_18, sub_20 # Graph fragment: # %convolution_15 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_15, 0.1), kwargs = {}) # %where_15 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_15, %convolution_15, %mul_25), kwargs = {}) # %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {}) # %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {}) # %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {}) # %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {}) # %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {}) # %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_10), kwargs = {}) # %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_28), kwargs = {}) # %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {}) # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %clamp_max_10), kwargs = {}) # %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_29), kwargs = {}) # %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_19, %add_18), kwargs = {}) # %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %clamp_max_11), kwargs = {}) # %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_18, %mul_30), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 16) % 16 x0 = xindex % 16 x6 = (xindex // 256) x2 = (xindex // 256) % 256 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + (x4), tmp49, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2n/c2ncutq26bahxlgkdh4rlnvyr47bwimc4zzutvjxr5n6y6efndwb.py # Topologically Sorted Source Nodes: [conv2d_16, x_23], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_16 => convolution_16 # x_23 => gt_16 # Graph fragment: # %convolution_16 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_20, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_16 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_16, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_28 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_28', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/oe/coeffqdqijre65ihx7pvd5fl3wkvhaztzhmg43n5sxftyfhfnesu.py # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_16, %where_5], 1), kwargs = {}) triton_poi_fused_cat_29 = async_compile.triton('triton_poi_fused_cat_29', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 256) % 256 x0 = xindex % 256 x2 = (xindex // 65536) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 256, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tl.load(in_ptr3 + (x0 + (256*((-128) + x1)) + (32768*x2)), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + (x3), tmp18, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kk/ckknliedqrn55tdjnurtw2wmbhy4m7nftlest3rkcxytrug6sjjb.py # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # x_25 => convert_element_type_13 # Graph fragment: # %convert_element_type_13 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_6, torch.int64), kwargs = {}) triton_poi_fused__to_copy_30 = async_compile.triton('triton_poi_fused__to_copy_30', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_30', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_30(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yi/cyid3ait3xdgmowp4yeresvz4pwdwiylxhglyhgg7hauo5drkgwr.py # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # x_25 => add_22, clamp_max_12 # Graph fragment: # %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_13, 1), kwargs = {}) # %clamp_max_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_22, 15), kwargs = {}) triton_poi_fused_add_clamp_31 = async_compile.triton('triton_poi_fused_add_clamp_31', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_31(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 15, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + (x0), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2b/c2bhdwgkdtsx66umnkggdw7khh2c5wl4ogab34mcyesmsvh7u75z.py # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] # Source node to ATen node mapping: # x_25 => add_21, clamp_max_14, clamp_min_12, clamp_min_14, convert_element_type_12, iota_6, mul_33, sub_21, sub_23 # Graph fragment: # %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_6, torch.float32), kwargs = {}) # %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_12, 0.5), kwargs = {}) # %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, 0.5), kwargs = {}) # %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_33, 0.5), kwargs = {}) # %clamp_min_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_21, 0.0), kwargs = {}) # %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_12, %convert_element_type_15), kwargs = {}) # %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_23, 0.0), kwargs = {}) # %clamp_max_14 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_14, 1.0), kwargs = {}) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/74/c74xhgb6wezra6e6cyzbt6yotgcl7i7n4p3es6nogdaczlcdlrl4.py # Topologically Sorted Source Nodes: [conv2d_17, x_24, x_25], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # conv2d_17 => convolution_17 # x_24 => mul_32, where_17 # x_25 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_25, add_26, add_27, mul_35, mul_36, mul_37, sub_24, sub_25, sub_27 # Graph fragment: # %convolution_17 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_36, %primals_37, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_17, 0.1), kwargs = {}) # %where_17 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_17, %convolution_17, %mul_32), kwargs = {}) # %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %convert_element_type_13, %convert_element_type_15]), kwargs = {}) # %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %convert_element_type_13, %clamp_max_13]), kwargs = {}) # %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %clamp_max_12, %convert_element_type_15]), kwargs = {}) # %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {}) # %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {}) # %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %clamp_max_14), kwargs = {}) # %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_35), kwargs = {}) # %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {}) # %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %clamp_max_14), kwargs = {}) # %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_36), kwargs = {}) # %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_26, %add_25), kwargs = {}) # %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %clamp_max_15), kwargs = {}) # %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %mul_37), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 32) % 32 x0 = xindex % 32 x6 = (xindex // 1024) x2 = (xindex // 1024) % 128 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + (x4), tmp49, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hu/chuh444ehiqujq3pobg3s6kf4jk3jfs66ff4yxzuqyv7z7gvdw4l.py # Topologically Sorted Source Nodes: [conv2d_18, x_26], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_18 => convolution_18 # x_26 => gt_18 # Graph fragment: # %convolution_18 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_27, %primals_38, %primals_39, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_18 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_18, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_34 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_34', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_34(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wo/cwov5xjz2rgypru6odo5shttzkvjzbv2j5h765xadmngxefsg27w.py # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_3 => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_18, %where_3], 1), kwargs = {}) triton_poi_fused_cat_35 = async_compile.triton('triton_poi_fused_cat_35', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 1024) % 128 x0 = xindex % 1024 x2 = (xindex // 131072) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 128, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tl.load(in_ptr3 + (x0 + (1024*((-64) + x1)) + (65536*x2)), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + (x3), tmp18, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dy/cdyi77rlf5lxwibfyd5p2m432vdqftcfdpn6tuqyv7hyajbxkkvo.py # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # x_28 => convert_element_type_17 # Graph fragment: # %convert_element_type_17 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_8, torch.int64), kwargs = {}) triton_poi_fused__to_copy_36 = async_compile.triton('triton_poi_fused__to_copy_36', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_36', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_36(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/l5/cl5xfazaijdiktz5n5gqb2xvmg6f5wpggcjdlnx676ib5wqnt2bo.py # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # x_28 => add_29, clamp_max_16 # Graph fragment: # %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_17, 1), kwargs = {}) # %clamp_max_16 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_29, 31), kwargs = {}) triton_poi_fused_add_clamp_37 = async_compile.triton('triton_poi_fused_add_clamp_37', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_37', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_37(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 31, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + (x0), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zw/czwrbkjbdq3qzzeebsz3vxkktcfyv5fp5csjdanc2n4yhokgkzxs.py # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] # Source node to ATen node mapping: # x_28 => add_28, clamp_max_18, clamp_min_16, clamp_min_18, convert_element_type_16, iota_8, mul_40, sub_28, sub_30 # Graph fragment: # %iota_8 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_16 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_8, torch.float32), kwargs = {}) # %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_16, 0.5), kwargs = {}) # %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_28, 0.5), kwargs = {}) # %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_40, 0.5), kwargs = {}) # %clamp_min_16 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_28, 0.0), kwargs = {}) # %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_16, %convert_element_type_19), kwargs = {}) # %clamp_min_18 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_30, 0.0), kwargs = {}) # %clamp_max_18 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_18, 1.0), kwargs = {}) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cj/ccjteqzvlszshm7xogphjn6lezrnhtguz6t2ft3y2qrajrcko2wk.py # Topologically Sorted Source Nodes: [conv2d_19, x_27, x_28], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # conv2d_19 => convolution_19 # x_27 => mul_39, where_19 # x_28 => _unsafe_index_16, _unsafe_index_17, _unsafe_index_18, _unsafe_index_19, add_32, add_33, add_34, mul_42, mul_43, mul_44, sub_31, sub_32, sub_34 # Graph fragment: # %convolution_19 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_40, %primals_41, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_19, 0.1), kwargs = {}) # %where_19 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_19, %convolution_19, %mul_39), kwargs = {}) # %_unsafe_index_16 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %convert_element_type_17, %convert_element_type_19]), kwargs = {}) # %_unsafe_index_17 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %convert_element_type_17, %clamp_max_17]), kwargs = {}) # %_unsafe_index_18 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %clamp_max_16, %convert_element_type_19]), kwargs = {}) # %_unsafe_index_19 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %clamp_max_16, %clamp_max_17]), kwargs = {}) # %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_17, %_unsafe_index_16), kwargs = {}) # %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %clamp_max_18), kwargs = {}) # %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_16, %mul_42), kwargs = {}) # %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_19, %_unsafe_index_18), kwargs = {}) # %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %clamp_max_18), kwargs = {}) # %add_33 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_18, %mul_43), kwargs = {}) # %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_33, %add_32), kwargs = {}) # %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_34, %clamp_max_19), kwargs = {}) # %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_32, %mul_44), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 64 x0 = xindex % 64 x6 = (xindex // 4096) x2 = (xindex // 4096) % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + (x4), tmp49, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vz/cvzt4swwgye3zpxep4ligqcdlu5xxf7fecsvlec4qsz3qtn6tkxy.py # Topologically Sorted Source Nodes: [conv2d_20, x_29], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_20 => convolution_20 # x_29 => gt_20 # Graph fragment: # %convolution_20 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_34, %primals_42, %primals_43, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_20 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_20, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_40 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_40', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_40', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 32 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rm/crmdmqyxav4fb4725fm2hhwf6m4yrxuhqmo2dbcjkiu6gomd2akp.py # Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_4 => cat_4 # Graph fragment: # %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_20, %where_1], 1), kwargs = {}) triton_poi_fused_cat_41 = async_compile.triton('triton_poi_fused_cat_41', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 64 x0 = xindex % 4096 x2 = (xindex // 262144) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tmp15 = tl.full([1], 64, tl.int64) tmp16 = tmp0 < tmp15 tmp17 = tl.load(in_ptr3 + (x0 + (4096*((-32) + x1)) + (131072*x2)), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + (x3), tmp18, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p7/cp74qdkmdadhqce4dzechhvpihfuica6bbejv65ptme5otg3jhj3.py # Topologically Sorted Source Nodes: [conv2d_22, x_31], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d_22 => convolution_22 # x_31 => gt_22, mul_47, where_22 # Graph fragment: # %convolution_22 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_21, %primals_46, %primals_47, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_22 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_22, 0), kwargs = {}) # %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_22, 0.1), kwargs = {}) # %where_22 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_22, %convolution_22, %mul_47), kwargs = {}) triton_poi_fused_convolution_leaky_relu_42 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_42', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_42', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_42(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 4 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47 = args args.clear() assert_size_stride(primals_1, (32, 4, 9, 9), (324, 81, 9, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 7, 7), (1568, 49, 7, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256, ), (1, )) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512, ), (1, )) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512, ), (1, )) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512, ), (1, )) assert_size_stride(primals_28, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_29, (512, ), (1, )) assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_31, (256, ), (1, )) assert_size_stride(primals_32, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_33, (256, ), (1, )) assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (128, ), (1, )) assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (128, ), (1, )) assert_size_stride(primals_38, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_39, (64, ), (1, )) assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_41, (64, ), (1, )) assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (32, ), (1, )) assert_size_stride(primals_44, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_45, (32, ), (1, )) assert_size_stride(primals_46, (4, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_47, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 524288, grid=grid(524288), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf5 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d_1, s1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 524288, grid=grid(524288), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_1.run(buf5, buf6, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf7, primals_7, buf8, buf9, 262144, grid=grid(262144), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf12 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf10, primals_9, buf11, buf12, 262144, grid=grid(262144), stream=stream0) del primals_9 buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_3.run(buf12, buf13, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1)) buf15 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf14, primals_11, buf15, buf16, 131072, grid=grid(131072), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 128, 16, 16), (32768, 256, 16, 1)) buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf19 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf17, primals_13, buf18, buf19, 131072, grid=grid(131072), stream=stream0) del primals_13 buf20 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_5.run(buf19, buf20, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf20, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 256, 8, 8), (16384, 64, 8, 1)) buf22 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool) buf23 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf21, primals_15, buf22, buf23, 65536, grid=grid(65536), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1)) buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool) buf26 = buf21; del buf21 # reuse # Topologically Sorted Source Nodes: [conv2d_7, x_9], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf24, primals_17, buf25, buf26, 65536, grid=grid(65536), stream=stream0) del primals_17 buf27 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_7.run(buf26, buf27, 16384, grid=grid(16384), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 4, 4), (8192, 16, 4, 1)) buf29 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool) buf30 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_8.run(buf28, primals_19, buf29, buf30, 32768, grid=grid(32768), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf31 = extern_kernels.convolution(buf30, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 512, 4, 4), (8192, 16, 4, 1)) buf32 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool) buf33 = buf28; del buf28 # reuse # Topologically Sorted Source Nodes: [conv2d_9, x_12], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_8.run(buf31, primals_21, buf32, buf33, 32768, grid=grid(32768), stream=stream0) del primals_21 buf34 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.avg_pool2d] triton_poi_fused_avg_pool2d_9.run(buf33, buf34, 8192, grid=grid(8192), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf35 = extern_kernels.convolution(buf34, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 2, 2), (2048, 4, 2, 1)) buf36 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool) buf37 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_10.run(buf35, primals_23, buf36, buf37, 8192, grid=grid(8192), stream=stream0) del buf35 del primals_23 # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 2, 2), (2048, 4, 2, 1)) buf39 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_11, x_15], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_11.run(buf38, primals_25, buf39, 8192, grid=grid(8192), stream=stream0) buf40 = empty_strided_cuda((4, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_12.run(buf40, 4, grid=grid(4), stream=stream0) buf41 = empty_strided_cuda((4, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_13.run(buf41, 4, grid=grid(4), stream=stream0) buf42 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_12.run(buf42, 4, grid=grid(4), stream=stream0) buf43 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_13.run(buf43, 4, grid=grid(4), stream=stream0) buf46 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14.run(buf46, 4, grid=grid(4), stream=stream0) buf48 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14.run(buf48, 4, grid=grid(4), stream=stream0) buf45 = buf31; del buf31 # reuse buf49 = buf45; del buf45 # reuse buf50 = buf49; del buf49 # reuse # Topologically Sorted Source Nodes: [conv2d_11, x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15.run(buf50, buf41, buf42, buf39, buf38, primals_25, buf40, buf43, buf46, buf48, 32768, grid=grid(32768), stream=stream0) del buf38 del primals_25 # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf51 = extern_kernels.convolution(buf50, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 512, 4, 4), (8192, 16, 4, 1)) buf52 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_12, x_17], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_16.run(buf51, primals_27, buf52, 32768, grid=grid(32768), stream=stream0) buf53 = reinterpret_tensor(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1), 0); del buf24 # reuse # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf52, buf51, primals_27, buf33, buf53, 65536, grid=grid(65536), stream=stream0) del buf51 del primals_27 # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf54 = extern_kernels.convolution(buf53, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 512, 4, 4), (8192, 16, 4, 1)) buf55 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_13, x_18], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_16.run(buf54, primals_29, buf55, 32768, grid=grid(32768), stream=stream0) buf56 = empty_strided_cuda((8, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_18.run(buf56, 8, grid=grid(8), stream=stream0) buf57 = empty_strided_cuda((8, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_19.run(buf57, 8, grid=grid(8), stream=stream0) buf58 = empty_strided_cuda((8, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_18.run(buf58, 8, grid=grid(8), stream=stream0) buf59 = empty_strided_cuda((8, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_19.run(buf59, 8, grid=grid(8), stream=stream0) buf62 = empty_strided_cuda((8, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20.run(buf62, 8, grid=grid(8), stream=stream0) buf64 = empty_strided_cuda((8, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20.run(buf64, 8, grid=grid(8), stream=stream0) buf61 = reinterpret_tensor(buf17, (4, 512, 8, 8), (32768, 64, 8, 1), 0); del buf17 # reuse buf65 = buf61; del buf61 # reuse buf66 = buf65; del buf65 # reuse # Topologically Sorted Source Nodes: [conv2d_13, x_18, x_19], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21.run(buf66, buf57, buf58, buf55, buf54, primals_29, buf56, buf59, buf62, buf64, 131072, grid=grid(131072), stream=stream0) del buf54 del primals_29 # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 256, 8, 8), (16384, 64, 8, 1)) buf68 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_14, x_20], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_22.run(buf67, primals_31, buf68, 65536, grid=grid(65536), stream=stream0) buf69 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] triton_poi_fused_cat_23.run(buf68, buf67, primals_31, buf26, buf69, 131072, grid=grid(131072), stream=stream0) del buf67 del primals_31 # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf70 = extern_kernels.convolution(buf69, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 256, 8, 8), (16384, 64, 8, 1)) buf71 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_15, x_21], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_22.run(buf70, primals_33, buf71, 65536, grid=grid(65536), stream=stream0) buf72 = empty_strided_cuda((16, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_24.run(buf72, 16, grid=grid(16), stream=stream0) buf73 = empty_strided_cuda((16, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_25.run(buf73, 16, grid=grid(16), stream=stream0) buf74 = empty_strided_cuda((16, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_24.run(buf74, 16, grid=grid(16), stream=stream0) buf75 = empty_strided_cuda((16, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_25.run(buf75, 16, grid=grid(16), stream=stream0) buf78 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26.run(buf78, 16, grid=grid(16), stream=stream0) buf80 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26.run(buf80, 16, grid=grid(16), stream=stream0) buf77 = reinterpret_tensor(buf10, (4, 256, 16, 16), (65536, 256, 16, 1), 0); del buf10 # reuse buf81 = buf77; del buf77 # reuse buf82 = buf81; del buf81 # reuse # Topologically Sorted Source Nodes: [conv2d_15, x_21, x_22], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27.run(buf82, buf73, buf74, buf71, buf70, primals_33, buf72, buf75, buf78, buf80, 262144, grid=grid(262144), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution] buf83 = extern_kernels.convolution(buf82, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1)) buf84 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_16, x_23], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_28.run(buf83, primals_35, buf84, 131072, grid=grid(131072), stream=stream0) buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] triton_poi_fused_cat_29.run(buf84, buf83, primals_35, buf19, buf85, 262144, grid=grid(262144), stream=stream0) del buf83 del primals_35 # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf86 = extern_kernels.convolution(buf85, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf86, (4, 128, 16, 16), (32768, 256, 16, 1)) buf87 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_17, x_24], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_28.run(buf86, primals_37, buf87, 131072, grid=grid(131072), stream=stream0) buf88 = empty_strided_cuda((32, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_30.run(buf88, 32, grid=grid(32), stream=stream0) buf89 = empty_strided_cuda((32, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_31.run(buf89, 32, grid=grid(32), stream=stream0) buf90 = empty_strided_cuda((32, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_30.run(buf90, 32, grid=grid(32), stream=stream0) buf91 = empty_strided_cuda((32, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_31.run(buf91, 32, grid=grid(32), stream=stream0) buf94 = empty_strided_cuda((32, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32.run(buf94, 32, grid=grid(32), stream=stream0) buf96 = empty_strided_cuda((32, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32.run(buf96, 32, grid=grid(32), stream=stream0) buf93 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0); del buf3 # reuse buf97 = buf93; del buf93 # reuse buf98 = buf97; del buf97 # reuse # Topologically Sorted Source Nodes: [conv2d_17, x_24, x_25], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33.run(buf98, buf89, buf90, buf87, buf86, primals_37, buf88, buf91, buf94, buf96, 524288, grid=grid(524288), stream=stream0) del buf86 del primals_37 # Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution] buf99 = extern_kernels.convolution(buf98, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf100 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_18, x_26], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_34.run(buf99, primals_39, buf100, 262144, grid=grid(262144), stream=stream0) buf101 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat] triton_poi_fused_cat_35.run(buf100, buf99, primals_39, buf12, buf101, 524288, grid=grid(524288), stream=stream0) del buf99 del primals_39 # Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution] buf102 = extern_kernels.convolution(buf101, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf102, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf103 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_19, x_27], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_34.run(buf102, primals_41, buf103, 262144, grid=grid(262144), stream=stream0) buf104 = empty_strided_cuda((64, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_36.run(buf104, 64, grid=grid(64), stream=stream0) buf105 = empty_strided_cuda((64, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_37.run(buf105, 64, grid=grid(64), stream=stream0) buf106 = empty_strided_cuda((64, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_36.run(buf106, 64, grid=grid(64), stream=stream0) buf107 = empty_strided_cuda((64, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_37.run(buf107, 64, grid=grid(64), stream=stream0) buf110 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38.run(buf110, 64, grid=grid(64), stream=stream0) buf112 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38.run(buf112, 64, grid=grid(64), stream=stream0) buf109 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) buf113 = buf109; del buf109 # reuse buf114 = buf113; del buf113 # reuse # Topologically Sorted Source Nodes: [conv2d_19, x_27, x_28], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39.run(buf114, buf105, buf106, buf103, buf102, primals_41, buf104, buf107, buf110, buf112, 1048576, grid=grid(1048576), stream=stream0) del buf102 del primals_41 # Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution] buf115 = extern_kernels.convolution(buf114, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf115, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf116 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_20, x_29], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_40.run(buf115, primals_43, buf116, 524288, grid=grid(524288), stream=stream0) buf117 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat] triton_poi_fused_cat_41.run(buf116, buf115, primals_43, buf5, buf117, 1048576, grid=grid(1048576), stream=stream0) del primals_43 # Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution] buf118 = extern_kernels.convolution(buf117, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf118, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf119 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf120 = buf115; del buf115 # reuse # Topologically Sorted Source Nodes: [conv2d_21, x_30], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_0.run(buf118, primals_45, buf119, buf120, 524288, grid=grid(524288), stream=stream0) del buf118 del primals_45 # Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution] buf121 = extern_kernels.convolution(buf120, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf121, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf122 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool) buf123 = reinterpret_tensor(buf70, (4, 4, 64, 64), (16384, 4096, 64, 1), 0); del buf70 # reuse # Topologically Sorted Source Nodes: [conv2d_22, x_31], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_42.run(buf121, primals_47, buf122, buf123, 65536, grid=grid(65536), stream=stream0) del buf121 del primals_47 return (buf123, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4, buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, buf29, buf30, buf32, buf33, buf34, buf36, buf37, buf39, buf40, buf41, buf42, buf43, buf46, buf48, buf50, buf52, buf53, buf55, buf56, buf57, buf58, buf59, buf62, buf64, buf66, buf68, buf69, buf71, buf72, buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf85, buf87, buf88, buf89, buf90, buf91, buf94, buf96, buf98, buf100, buf101, buf103, buf104, buf105, buf106, buf107, buf110, buf112, buf114, buf116, buf117, buf119, buf120, buf122, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4, 9, 9), (324, 81, 9, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 7, 7), (1568, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((512, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((4, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class down(nn.Module): """ A class for creating neural network blocks containing layers: Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels, filterSize): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used as input and output channels for the second convolutional layer. filterSize : int filter size for the convolution filter. input N would create a N x N filter. """ super(down, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride= 1, padding=int((filterSize - 1) / 2)) self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride =1, padding=int((filterSize - 1) / 2)) def forward(self, x): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. Returns ------- tensor output of the NN block. """ x = F.avg_pool2d(x, 2) x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(x), negative_slope=0.1) return x class up(nn.Module): """ A class for creating neural network blocks containing layers: Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x, skpCn) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used for setting input and output channels for the second convolutional layer. """ super(up, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1) self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1, padding=1) def forward(self, x, skpCn): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. skpCn : tensor skip connection input to the NN block. Returns ------- tensor output of the NN block. """ x = F.interpolate(x, scale_factor=2, mode='bilinear') x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)), negative_slope=0.1) return x class UNet(nn.Module): """ A class for creating UNet architecture with skip connections. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet. """ super(UNet, self).__init__() self.conv1 = nn.Conv2d(inChannels, 32, 9, stride=1, padding=4) self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3) self.down1 = down(32, 64, 5) self.down2 = down(64, 128, 3) self.down3 = down(128, 256, 3) self.down4 = down(256, 512, 3) self.down5 = down(512, 512, 3) self.up1 = up(512, 512) self.up2 = up(512, 256) self.up3 = up(256, 128) self.up4 = up(128, 64) self.up5 = up(64, 32) self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1) def forward(self, x): """ Returns output tensor after passing input `x` to the neural network. Parameters ---------- x : tensor input to the UNet. Returns ------- tensor output of the UNet. """ x = F.leaky_relu(self.conv1(x), negative_slope=0.1) s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1) s2 = self.down1(s1) s3 = self.down2(s2) s4 = self.down3(s3) s5 = self.down4(s4) x = self.down5(s5) x = self.up1(x, s5) x = self.up2(x, s4) x = self.up3(x, s3) x = self.up4(x, s2) x = self.up5(x, s1) x = F.leaky_relu(self.conv3(x), negative_slope=0.1) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'inChannels': 4, 'outChannels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_avg_pool2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = triton_helpers.minimum(tmp10, tmp9) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4 % 4 x0 = xindex % 4 x6 = xindex // 16 x2 = xindex // 16 % 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 2, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 2 * tmp19 + 4 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 2 * tmp4 + 4 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 1024 x0 = xindex % 16 x2 = xindex // 16384 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0).to(tl .int1) tmp6 = tl.load(in_ptr1 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 1024, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 16 * (-512 + x1) + 8192 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_18(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_19(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 3, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 8 % 8 x0 = xindex % 8 x6 = xindex // 64 x2 = xindex // 64 % 512 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 4 * tmp19 + 16 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 4 * tmp4 + 16 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 512 x0 = xindex % 64 x2 = xindex // 32768 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0).to( tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 7, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 16 x0 = xindex % 16 x6 = xindex // 256 x2 = xindex // 256 % 256 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 8 * tmp19 + 64 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 8 * tmp4 + 64 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 256 x0 = xindex % 256 x2 = xindex // 65536 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0).to( tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_30(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_31(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 15, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 32 % 32 x0 = xindex % 32 x6 = xindex // 1024 x2 = xindex // 1024 % 128 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 16 * tmp19 + 256 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 16 * tmp4 + 256 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_34(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 128 x0 = xindex % 1024 x2 = xindex // 131072 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0 ).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused__to_copy_36(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_37(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 31, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39( in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x6 = xindex // 4096 x2 = xindex // 4096 % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last') tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp10 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last') tmp12 = tmp10 + tmp11 tmp13 = 0.1 tmp14 = tmp12 * tmp13 tmp15 = tl.where(tmp9, tmp12, tmp14) tmp17 = tmp16 + tmp1 tmp18 = tmp16 < 0 tmp19 = tl.where(tmp18, tmp17, tmp16) tmp20 = tl.load(in_ptr2 + (tmp8 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr3 + (tmp8 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last') tmp22 = tmp21 + tmp11 tmp23 = tmp22 * tmp13 tmp24 = tl.where(tmp20, tmp22, tmp23) tmp26 = tmp25 + tmp1 tmp27 = tmp25 < 0 tmp28 = tl.where(tmp27, tmp26, tmp25) tmp29 = tl.load(in_ptr2 + (tmp28 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp30 = tl.load(in_ptr3 + (tmp28 + 32 * tmp19 + 1024 * x6), None, eviction_policy='evict_last') tmp31 = tmp30 + tmp11 tmp32 = tmp31 * tmp13 tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = tmp33 - tmp24 tmp36 = tmp34 * tmp35 tmp37 = tmp24 + tmp36 tmp38 = tl.load(in_ptr2 + (tmp28 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last').to(tl.int1) tmp39 = tl.load(in_ptr3 + (tmp28 + 32 * tmp4 + 1024 * x6), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp11 tmp41 = tmp40 * tmp13 tmp42 = tl.where(tmp38, tmp40, tmp41) tmp43 = tmp42 - tmp15 tmp44 = tmp43 * tmp35 tmp45 = tmp15 + tmp44 tmp46 = tmp45 - tmp37 tmp48 = tmp46 * tmp47 tmp49 = tmp37 + tmp48 tl.store(in_out_ptr1 + x4, tmp49, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tl.store(out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 64 x0 = xindex % 4096 x2 = xindex // 262144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0 ).to(tl.int1) tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0) tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = 0.1 tmp10 = tmp8 * tmp9 tmp11 = tl.where(tmp5, tmp8, tmp10) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp4, tmp11, tmp12) tmp14 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp17 = tl.load(in_ptr3 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp14, other=0.0) tmp18 = tl.where(tmp4, tmp13, tmp17) tl.store(out_ptr0 + x3, tmp18, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_42(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47) = args args.clear() assert_size_stride(primals_1, (32, 4, 9, 9), (324, 81, 9, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 7, 7), (1568, 49, 7, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_29, (512,), (1,)) assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_31, (256,), (1,)) assert_size_stride(primals_32, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_33, (256,), (1,)) assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (128,), (1,)) assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (128,), (1,)) assert_size_stride(primals_38, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_39, (64,), (1,)) assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_41, (64,), (1,)) assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (32,), (1,)) assert_size_stride(primals_44, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_45, (32,), (1,)) assert_size_stride(primals_46, (4, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_47, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0, primals_2, buf1, buf2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf5 = buf0 del buf0 triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf3, primals_5, buf4, buf5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) triton_poi_fused_avg_pool2d_1[grid(131072)](buf5, buf6, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf7, primals_7, buf8, buf9, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) buf12 = buf7 del buf7 triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf10, primals_9, buf11, buf12, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) triton_poi_fused_avg_pool2d_3[grid(65536)](buf12, buf13, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1)) buf15 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf14, primals_11, buf15, buf16, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf17 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 128, 16, 16), (32768, 256, 16, 1)) buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) buf19 = buf14 del buf14 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf17, primals_13, buf18, buf19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf20 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) triton_poi_fused_avg_pool2d_5[grid(32768)](buf19, buf20, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf21 = extern_kernels.convolution(buf20, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 256, 8, 8), (16384, 64, 8, 1)) buf22 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) buf23 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf21, primals_15, buf22, buf23, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1)) buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) buf26 = buf21 del buf21 triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf24, primals_17, buf25, buf26, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf27 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) triton_poi_fused_avg_pool2d_7[grid(16384)](buf26, buf27, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 4, 4), (8192, 16, 4, 1)) buf29 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) buf30 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf28, primals_19, buf29, buf30, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_19 buf31 = extern_kernels.convolution(buf30, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 512, 4, 4), (8192, 16, 4, 1)) buf32 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) buf33 = buf28 del buf28 triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf31, primals_21, buf32, buf33, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_21 buf34 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch. float32) triton_poi_fused_avg_pool2d_9[grid(8192)](buf33, buf34, 8192, XBLOCK=128, num_warps=4, num_stages=1) buf35 = extern_kernels.convolution(buf34, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 2, 2), (2048, 4, 2, 1)) buf36 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool) buf37 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch. float32) triton_poi_fused_convolution_leaky_relu_10[grid(8192)](buf35, primals_23, buf36, buf37, 8192, XBLOCK=256, num_warps=4, num_stages=1) del buf35 del primals_23 buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 2, 2), (2048, 4, 2, 1)) buf39 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_11[grid(8192)](buf38, primals_25, buf39, 8192, XBLOCK=128, num_warps=4, num_stages=1) buf40 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_12[grid(4)](buf40, 4, XBLOCK=4, num_warps =1, num_stages=1) buf41 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_13[grid(4)](buf41, 4, XBLOCK=4, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_12[grid(4)](buf42, 4, XBLOCK=4, num_warps =1, num_stages=1) buf43 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_add_clamp_13[grid(4)](buf43, 4, XBLOCK=4, num_warps=1, num_stages=1) buf46 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(4)](buf46, 4, XBLOCK=4, num_warps=1, num_stages=1) buf48 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(4)](buf48, 4, XBLOCK=4, num_warps=1, num_stages=1) buf45 = buf31 del buf31 buf49 = buf45 del buf45 buf50 = buf49 del buf49 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15[ grid(32768)](buf50, buf41, buf42, buf39, buf38, primals_25, buf40, buf43, buf46, buf48, 32768, XBLOCK=128, num_warps=4, num_stages=1) del buf38 del primals_25 buf51 = extern_kernels.convolution(buf50, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 512, 4, 4), (8192, 16, 4, 1)) buf52 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf51, primals_27, buf52, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf53 = reinterpret_tensor(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1), 0 ) del buf24 triton_poi_fused_cat_17[grid(65536)](buf52, buf51, primals_27, buf33, buf53, 65536, XBLOCK=256, num_warps=4, num_stages=1) del buf51 del primals_27 buf54 = extern_kernels.convolution(buf53, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 512, 4, 4), (8192, 16, 4, 1)) buf55 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf54, primals_29, buf55, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf56 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_18[grid(8)](buf56, 8, XBLOCK=8, num_warps =1, num_stages=1) buf57 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_19[grid(8)](buf57, 8, XBLOCK=8, num_warps=1, num_stages=1) buf58 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_18[grid(8)](buf58, 8, XBLOCK=8, num_warps =1, num_stages=1) buf59 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused_add_clamp_19[grid(8)](buf59, 8, XBLOCK=8, num_warps=1, num_stages=1) buf62 = empty_strided_cuda((8,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20[grid(8)](buf62, 8, XBLOCK=8, num_warps=1, num_stages=1) buf64 = empty_strided_cuda((8, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20[grid(8)](buf64, 8, XBLOCK=8, num_warps=1, num_stages=1) buf61 = reinterpret_tensor(buf17, (4, 512, 8, 8), (32768, 64, 8, 1), 0) del buf17 buf65 = buf61 del buf61 buf66 = buf65 del buf65 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21[ grid(131072)](buf66, buf57, buf58, buf55, buf54, primals_29, buf56, buf59, buf62, buf64, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf54 del primals_29 buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 256, 8, 8), (16384, 64, 8, 1)) buf68 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf67, primals_31, buf68, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf69 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) triton_poi_fused_cat_23[grid(131072)](buf68, buf67, primals_31, buf26, buf69, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf67 del primals_31 buf70 = extern_kernels.convolution(buf69, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 256, 8, 8), (16384, 64, 8, 1)) buf71 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf70, primals_33, buf71, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf72 = empty_strided_cuda((16, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_24[grid(16)](buf72, 16, XBLOCK=16, num_warps=1, num_stages=1) buf73 = empty_strided_cuda((16, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_25[grid(16)](buf73, 16, XBLOCK=16, num_warps=1, num_stages=1) buf74 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_24[grid(16)](buf74, 16, XBLOCK=16, num_warps=1, num_stages=1) buf75 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused_add_clamp_25[grid(16)](buf75, 16, XBLOCK=16, num_warps=1, num_stages=1) buf78 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26[grid(16)](buf78, 16, XBLOCK=16, num_warps=1, num_stages=1) buf80 = empty_strided_cuda((16, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26[grid(16)](buf80, 16, XBLOCK=16, num_warps=1, num_stages=1) buf77 = reinterpret_tensor(buf10, (4, 256, 16, 16), (65536, 256, 16, 1), 0) del buf10 buf81 = buf77 del buf77 buf82 = buf81 del buf81 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27[ grid(262144)](buf82, buf73, buf74, buf71, buf70, primals_33, buf72, buf75, buf78, buf80, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_33 buf83 = extern_kernels.convolution(buf82, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1)) buf84 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf83, primals_35, buf84, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) triton_poi_fused_cat_29[grid(262144)](buf84, buf83, primals_35, buf19, buf85, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf83 del primals_35 buf86 = extern_kernels.convolution(buf85, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf86, (4, 128, 16, 16), (32768, 256, 16, 1)) buf87 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf86, primals_37, buf87, 131072, XBLOCK=1024, num_warps=4, num_stages=1) buf88 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_30[grid(32)](buf88, 32, XBLOCK=32, num_warps=1, num_stages=1) buf89 = empty_strided_cuda((32, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_31[grid(32)](buf89, 32, XBLOCK=32, num_warps=1, num_stages=1) buf90 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_30[grid(32)](buf90, 32, XBLOCK=32, num_warps=1, num_stages=1) buf91 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused_add_clamp_31[grid(32)](buf91, 32, XBLOCK=32, num_warps=1, num_stages=1) buf94 = empty_strided_cuda((32,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32[grid(32)](buf94, 32, XBLOCK=32, num_warps=1, num_stages=1) buf96 = empty_strided_cuda((32, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32[grid(32)](buf96, 32, XBLOCK=32, num_warps=1, num_stages=1) buf93 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0) del buf3 buf97 = buf93 del buf93 buf98 = buf97 del buf97 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33[ grid(524288)](buf98, buf89, buf90, buf87, buf86, primals_37, buf88, buf91, buf94, buf96, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf86 del primals_37 buf99 = extern_kernels.convolution(buf98, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf99, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf100 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf99, primals_39, buf100, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf101 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) triton_poi_fused_cat_35[grid(524288)](buf100, buf99, primals_39, buf12, buf101, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf99 del primals_39 buf102 = extern_kernels.convolution(buf101, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf102, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf103 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf102, primals_41, buf103, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf104 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_36[grid(64)](buf104, 64, XBLOCK=64, num_warps=1, num_stages=1) buf105 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_37[grid(64)](buf105, 64, XBLOCK=64, num_warps=1, num_stages=1) buf106 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_36[grid(64)](buf106, 64, XBLOCK=64, num_warps=1, num_stages=1) buf107 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_37[grid(64)](buf107, 64, XBLOCK=64, num_warps=1, num_stages=1) buf110 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38[grid(64)](buf110, 64, XBLOCK=64, num_warps=1, num_stages=1) buf112 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38[grid(64)](buf112, 64, XBLOCK=64, num_warps=1, num_stages=1) buf109 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) buf113 = buf109 del buf109 buf114 = buf113 del buf113 triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39[ grid(1048576)](buf114, buf105, buf106, buf103, buf102, primals_41, buf104, buf107, buf110, buf112, 1048576, XBLOCK= 1024, num_warps=4, num_stages=1) del buf102 del primals_41 buf115 = extern_kernels.convolution(buf114, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf115, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf116 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_40[grid(524288)](buf115, primals_43, buf116, 524288, XBLOCK=1024, num_warps=4, num_stages=1) buf117 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_poi_fused_cat_41[grid(1048576)](buf116, buf115, primals_43, buf5, buf117, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_43 buf118 = extern_kernels.convolution(buf117, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf118, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf119 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) buf120 = buf115 del buf115 triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf118, primals_45, buf119, buf120, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf118 del primals_45 buf121 = extern_kernels.convolution(buf120, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf121, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf122 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool) buf123 = reinterpret_tensor(buf70, (4, 4, 64, 64), (16384, 4096, 64, 1), 0) del buf70 triton_poi_fused_convolution_leaky_relu_42[grid(65536)](buf121, primals_47, buf122, buf123, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf121 del primals_47 return (buf123, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4, buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, buf29, buf30, buf32, buf33, buf34, buf36, buf37, buf39, buf40, buf41, buf42, buf43, buf46, buf48, buf50, buf52, buf53, buf55, buf56, buf57, buf58, buf59, buf62, buf64, buf66, buf68, buf69, buf71, buf72, buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf85, buf87, buf88, buf89, buf90, buf91, buf94, buf96, buf98, buf100, buf101, buf103, buf104, buf105, buf106, buf107, buf110, buf112, buf114, buf116, buf117, buf119, buf120, buf122) class down(nn.Module): """ A class for creating neural network blocks containing layers: Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels, filterSize): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used as input and output channels for the second convolutional layer. filterSize : int filter size for the convolution filter. input N would create a N x N filter. """ super(down, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride= 1, padding=int((filterSize - 1) / 2)) self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride =1, padding=int((filterSize - 1) / 2)) def forward(self, x): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. Returns ------- tensor output of the NN block. """ x = F.avg_pool2d(x, 2) x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(x), negative_slope=0.1) return x class up(nn.Module): """ A class for creating neural network blocks containing layers: Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU This is used in the UNet Class to create a UNet like NN architecture. ... Methods ------- forward(x, skpCn) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the first convolutional layer. outChannels : int number of output channels for the first convolutional layer. This is also used for setting input and output channels for the second convolutional layer. """ super(up, self).__init__() self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1) self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1, padding=1) def forward(self, x, skpCn): """ Returns output tensor after passing input `x` to the neural network block. Parameters ---------- x : tensor input to the NN block. skpCn : tensor skip connection input to the NN block. Returns ------- tensor output of the NN block. """ x = F.interpolate(x, scale_factor=2, mode='bilinear') x = F.leaky_relu(self.conv1(x), negative_slope=0.1) x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)), negative_slope=0.1) return x class UNetNew(nn.Module): """ A class for creating UNet architecture with skip connections. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block. """ def __init__(self, inChannels, outChannels): """ Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet. """ super(UNetNew, self).__init__() self.conv1 = nn.Conv2d(inChannels, 32, 9, stride=1, padding=4) self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3) self.down1 = down(32, 64, 5) self.down2 = down(64, 128, 3) self.down3 = down(128, 256, 3) self.down4 = down(256, 512, 3) self.down5 = down(512, 512, 3) self.up1 = up(512, 512) self.up2 = up(512, 256) self.up3 = up(256, 128) self.up4 = up(128, 64) self.up5 = up(64, 32) self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.down1.conv1.weight primals_7 = self.down1.conv1.bias primals_8 = self.down1.conv2.weight primals_9 = self.down1.conv2.bias primals_10 = self.down2.conv1.weight primals_11 = self.down2.conv1.bias primals_12 = self.down2.conv2.weight primals_13 = self.down2.conv2.bias primals_14 = self.down3.conv1.weight primals_15 = self.down3.conv1.bias primals_16 = self.down3.conv2.weight primals_17 = self.down3.conv2.bias primals_18 = self.down4.conv1.weight primals_19 = self.down4.conv1.bias primals_20 = self.down4.conv2.weight primals_21 = self.down4.conv2.bias primals_22 = self.down5.conv1.weight primals_23 = self.down5.conv1.bias primals_24 = self.down5.conv2.weight primals_25 = self.down5.conv2.bias primals_26 = self.up1.conv1.weight primals_27 = self.up1.conv1.bias primals_28 = self.up1.conv2.weight primals_29 = self.up1.conv2.bias primals_30 = self.up2.conv1.weight primals_31 = self.up2.conv1.bias primals_32 = self.up2.conv2.weight primals_33 = self.up2.conv2.bias primals_34 = self.up3.conv1.weight primals_35 = self.up3.conv1.bias primals_36 = self.up3.conv2.weight primals_37 = self.up3.conv2.bias primals_38 = self.up4.conv1.weight primals_39 = self.up4.conv1.bias primals_40 = self.up4.conv2.weight primals_41 = self.up4.conv2.bias primals_42 = self.up5.conv1.weight primals_43 = self.up5.conv1.bias primals_44 = self.up5.conv2.weight primals_45 = self.up5.conv2.bias primals_46 = self.conv3.weight primals_47 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47]) return output[0]
avinashpaliwal/Deep-SloMo
UNet
false
15,057
[ "MIT" ]
76
93373aa3cb9fd384fbf905e235fe6eb4f9cac780
https://github.com/avinashpaliwal/Deep-SloMo/tree/93373aa3cb9fd384fbf905e235fe6eb4f9cac780
NormMLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6n/c6nwltytpo33ssumvxlcryrpvlql2hsjrmxl624j4dkkjxt5qgkm.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mn/cmntyljhuirhsdjg2yosgzllpkpxqedxgoyk6gunquq2rf3kl7u5.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {}) triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_4, primals_5, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_5 return (buf4, primals_4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class NormMLP(nn.Module): def __init__(self, input_size, output_size): super(NormMLP, self).__init__() self.linear = nn.Linear(input_size, output_size) self.layer_norm = nn.LayerNorm(output_size) def forward(self, activations): return self.layer_norm(self.linear(F.relu(activations))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_4, primals_5, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_5 return buf4, primals_4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1 class NormMLPNew(nn.Module): def __init__(self, input_size, output_size): super(NormMLPNew, self).__init__() self.linear = nn.Linear(input_size, output_size) self.layer_norm = nn.LayerNorm(output_size) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_4 = self.layer_norm.weight primals_5 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
cogentlabs/apl
NormMLP
false
15,058
[ "MIT" ]
50
78092b162e019a2df0ab5ea31d4db0b9860090d3
https://github.com/cogentlabs/apl/tree/78092b162e019a2df0ab5ea31d4db0b9860090d3
SoftTargetCrossEntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # cross_entropy => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lh/clhitxf7kqgata3jhxxokguzlgnt3gklcnczie4tvi2oyel75ijc.py # Topologically Sorted Source Nodes: [cross_entropy, sum_1, add, target_2], Original ATen: [aten._log_softmax, aten.sum, aten.add, aten.div, aten.mul, aten.neg] # Source node to ATen node mapping: # add => add # cross_entropy => div_1, exp, log, mul, neg, sub_1, sum_2, sum_3 # sum_1 => sum_1 # target_2 => div # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1.1920928955078125e-07), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %div), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_3,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) triton_per_fused__log_softmax_add_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_add_div_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_add_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_add_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp15 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp17 = tmp15 + tmp16 tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = 1.1920928955078125e-07 tmp23 = tmp21 + tmp22 tmp24 = tmp14 / tmp23 tmp25 = tmp13 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = -tmp28 tmp30 = 0.015625 tmp31 = tmp29 * tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp31, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [cross_entropy, sum_1, add, target_2], Original ATen: [aten._log_softmax, aten.sum, aten.add, aten.div, aten.mul, aten.neg] triton_per_fused__log_softmax_add_div_mul_neg_sum_1.run(buf3, buf0, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del buf0 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch def _convert_to_one_hot(targets: 'torch.Tensor', classes: 'int' ) ->torch.Tensor: """ This function converts target class indices to one-hot vectors, given the number of classes. """ if torch.max(targets).item() >= classes: raise ValueError('Class Index must be less than number of classes') one_hot_targets = torch.zeros((targets.shape[0], classes), dtype=torch. long, device=targets.device) one_hot_targets.scatter_(1, targets.long(), 1) return one_hot_targets class SoftTargetCrossEntropyLoss(torch.nn.CrossEntropyLoss): """This loss allows the targets for the cross entropy loss to be multi-label. Args: reduction (str): specifies reduction to apply to the output. normalize_targets (bool): whether the targets should be normalized to a sum of 1 based on the total count of positive targets for a given sample. """ def __init__(self, reduction: 'str'='mean', normalize_targets: 'bool'=True ) ->None: super().__init__(reduction=reduction) self.normalize_targets = normalize_targets self._eps: 'float' = torch.finfo(torch.float32).eps def forward(self, input: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: target = target.detach().clone() if target.ndim == 1: if input.shape[0] != target.shape[0]: raise ValueError( 'SoftTargetCrossEntropyLoss requires input and target to have same batch size!' ) target = _convert_to_one_hot(target.view(-1, 1), input.shape[1]) target = target.float() if self.normalize_targets: target /= self._eps + target.sum(dim=1, keepdim=True) return super().forward(input, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_add_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp15 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp17 = tmp15 + tmp16 tmp19 = tmp17 + tmp18 tmp21 = tmp19 + tmp20 tmp22 = 1.1920928955078125e-07 tmp23 = tmp21 + tmp22 tmp24 = tmp14 / tmp23 tmp25 = tmp13 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = -tmp28 tmp30 = 0.015625 tmp31 = tmp29 * tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_add_div_mul_neg_sum_1[grid(1)](buf3, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf3, def _convert_to_one_hot(targets: 'torch.Tensor', classes: 'int' ) ->torch.Tensor: """ This function converts target class indices to one-hot vectors, given the number of classes. """ if torch.max(targets).item() >= classes: raise ValueError('Class Index must be less than number of classes') one_hot_targets = torch.zeros((targets.shape[0], classes), dtype=torch. long, device=targets.device) one_hot_targets.scatter_(1, targets.long(), 1) return one_hot_targets class SoftTargetCrossEntropyLossNew(torch.nn.CrossEntropyLoss): """This loss allows the targets for the cross entropy loss to be multi-label. Args: reduction (str): specifies reduction to apply to the output. normalize_targets (bool): whether the targets should be normalized to a sum of 1 based on the total count of positive targets for a given sample. """ def __init__(self, reduction: 'str'='mean', normalize_targets: 'bool'=True ) ->None: super().__init__(reduction=reduction) self.normalize_targets = normalize_targets self._eps: 'float' = torch.finfo(torch.float32).eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
colin2328/recipes
SoftTargetCrossEntropyLoss
false
15,059
[ "BSD-3-Clause" ]
161
a6cd0e12c9fcb48749721a6548d0a02319d54bd1
https://github.com/colin2328/recipes/tree/a6cd0e12c9fcb48749721a6548d0a02319d54bd1
LeakyReLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bh/cbhjelgjjonnfx2cmpcowhfowpub62dfr4klgbacfyotvwmh3vzu.py # Topologically Sorted Source Nodes: [features_stddev, div, sub_3, mul, wrapped_sqrt_1, truediv_2, erf, add, cdf, mu_cdf, sub, pow_1, neg, sub_1, wrapped_log_1, sub_2, pdf, stddev_pdf, mean_r, neg_1, negative_cdf, mul_6, mean_n, mul_9, outputs_mean, pow_2, squared_mean_variance, mul_5, mean_stddev_pdf, add_3, pow_3, variance_r, mul_7, sub_6, pow_4, variance_n, mul_10, add_5, neg_2, covxy, mul_11, outputs_variance], Original ATen: [aten.sqrt, aten.div, aten.sub, aten.mul, aten.erf, aten.add, aten.pow, aten.neg, aten.log, aten.exp, aten.rsub] # Source node to ATen node mapping: # add => add # add_3 => add_3 # add_5 => add_5 # cdf => mul_1 # covxy => mul_8 # div => div # erf => erf # features_stddev => sqrt # mean_n => add_4 # mean_r => add_2 # mean_stddev_pdf => mul_4 # mu_cdf => mul_2 # mul => mul # mul_10 => mul_10 # mul_11 => mul_11 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # mul_9 => mul_9 # neg => neg # neg_1 => neg_1 # neg_2 => neg_2 # negative_cdf => sub_4 # outputs_mean => sub_8 # outputs_variance => sub_9 # pdf => exp # pow_1 => pow_1 # pow_2 => pow_2 # pow_3 => pow_3 # pow_4 => pow_4 # squared_mean_variance => add_1 # stddev_pdf => mul_3 # sub => sub # sub_1 => div_1 # sub_2 => sub_2 # sub_3 => sub_3 # sub_6 => sub_6 # truediv_2 => div_2 # variance_n => sub_7 # variance_r => sub_5 # wrapped_log_1 => full_default_1 # wrapped_sqrt_1 => full_default_2 # Graph fragment: # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%arg0_1,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %sqrt), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, 0.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 1.0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.414213562373095), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %full_default_2), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %mul_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, 0.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, 2.0), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.9189385332046727), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_1, %full_default_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %mul_3 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %exp), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, %sub_4), kwargs = {}) # %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %mul_3), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, 0.01), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %mul_9), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, %arg0_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %mul_1), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %mul_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %mul_4), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_2, 2), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %pow_3), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %sub_4), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_7, %mul_4), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_4, 2), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_6, %pow_4), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, 0.0001), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_5, %mul_10), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add_2,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_2, %add_4), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_8, 0.02), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %mul_11), kwargs = {}) triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0 = async_compile.triton('triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = libdevice.sqrt(tmp1) tmp3 = tmp0 / tmp2 tmp4 = 0.0 tmp5 = tmp3 - tmp4 tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = 1.414213562373095 tmp9 = tmp7 / tmp8 tmp10 = libdevice.erf(tmp9) tmp11 = tmp10 + tmp6 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tmp14 = tmp0 * tmp13 tmp15 = tmp5 * tmp5 tmp16 = -tmp15 tmp17 = tmp16 * tmp12 tmp18 = 0.9189385332046727 tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp21 = tmp2 * tmp20 tmp22 = tmp14 + tmp21 tmp23 = -tmp0 tmp24 = tmp6 - tmp13 tmp25 = tmp23 * tmp24 tmp26 = tmp25 + tmp21 tmp27 = 0.01 tmp28 = tmp26 * tmp27 tmp29 = tmp22 - tmp28 tmp30 = tmp0 * tmp0 tmp31 = tmp30 + tmp1 tmp32 = tmp31 * tmp13 tmp33 = tmp0 * tmp21 tmp34 = tmp32 + tmp33 tmp35 = tmp22 * tmp22 tmp36 = tmp34 - tmp35 tmp37 = tmp31 * tmp24 tmp38 = tmp37 - tmp33 tmp39 = tmp26 * tmp26 tmp40 = tmp38 - tmp39 tmp41 = -tmp22 tmp42 = tmp41 * tmp26 tmp43 = 0.0001 tmp44 = tmp40 * tmp43 tmp45 = tmp36 + tmp44 tmp46 = 0.02 tmp47 = tmp42 * tmp46 tmp48 = tmp45 - tmp47 tl.store(out_ptr0 + (x0), tmp29, xmask) tl.store(in_out_ptr0 + (x0), tmp48, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [features_stddev, div, sub_3, mul, wrapped_sqrt_1, truediv_2, erf, add, cdf, mu_cdf, sub, pow_1, neg, sub_1, wrapped_log_1, sub_2, pdf, stddev_pdf, mean_r, neg_1, negative_cdf, mul_6, mean_n, mul_9, outputs_mean, pow_2, squared_mean_variance, mul_5, mean_stddev_pdf, add_3, pow_3, variance_r, mul_7, sub_6, pow_4, variance_n, mul_10, add_5, neg_2, covxy, mul_11, outputs_variance], Original ATen: [aten.sqrt, aten.div, aten.sub, aten.mul, aten.erf, aten.add, aten.pow, aten.neg, aten.log, aten.exp, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0.run(buf4, arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn from numbers import Number def keep_variance_fn(x): return x + 0.001 def normcdf(value, mu=0.0, stddev=1.0): sinv = 1.0 / stddev if isinstance(stddev, Number) else stddev.reciprocal() return 0.5 * (1.0 + torch.erf((value - mu) * sinv / np.sqrt(2.0))) def _normal_log_pdf(value, mu, stddev): var = stddev ** 2 log_scale = np.log(stddev) if isinstance(stddev, Number) else torch.log( stddev) return -(value - mu) ** 2 / (2.0 * var) - log_scale - np.log(np.sqrt( 2.0 * np.pi)) def normpdf(value, mu=0.0, stddev=1.0): return torch.exp(_normal_log_pdf(value, mu, stddev)) class LeakyReLU(nn.Module): def __init__(self, negative_slope=0.01, keep_variance_fn=None): super(LeakyReLU, self).__init__() self._keep_variance_fn = keep_variance_fn self._negative_slope = negative_slope def forward(self, features_mean, features_variance): features_stddev = torch.sqrt(features_variance) div = features_mean / features_stddev pdf = normpdf(div) cdf = normcdf(div) negative_cdf = 1.0 - cdf mu_cdf = features_mean * cdf stddev_pdf = features_stddev * pdf squared_mean_variance = features_mean ** 2 + features_variance mean_stddev_pdf = features_mean * stddev_pdf mean_r = mu_cdf + stddev_pdf variance_r = (squared_mean_variance * cdf + mean_stddev_pdf - mean_r ** 2) mean_n = -features_mean * negative_cdf + stddev_pdf variance_n = (squared_mean_variance * negative_cdf - mean_stddev_pdf - mean_n ** 2) covxy = -mean_r * mean_n outputs_mean = mean_r - self._negative_slope * mean_n outputs_variance = (variance_r + self._negative_slope * self. _negative_slope * variance_n - 2.0 * self._negative_slope * covxy) if self._keep_variance_fn is not None: outputs_variance = self._keep_variance_fn(outputs_variance) return outputs_mean, outputs_variance def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.sqrt(tmp1) tmp3 = tmp0 / tmp2 tmp4 = 0.0 tmp5 = tmp3 - tmp4 tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = 1.414213562373095 tmp9 = tmp7 / tmp8 tmp10 = libdevice.erf(tmp9) tmp11 = tmp10 + tmp6 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tmp14 = tmp0 * tmp13 tmp15 = tmp5 * tmp5 tmp16 = -tmp15 tmp17 = tmp16 * tmp12 tmp18 = 0.9189385332046727 tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp21 = tmp2 * tmp20 tmp22 = tmp14 + tmp21 tmp23 = -tmp0 tmp24 = tmp6 - tmp13 tmp25 = tmp23 * tmp24 tmp26 = tmp25 + tmp21 tmp27 = 0.01 tmp28 = tmp26 * tmp27 tmp29 = tmp22 - tmp28 tmp30 = tmp0 * tmp0 tmp31 = tmp30 + tmp1 tmp32 = tmp31 * tmp13 tmp33 = tmp0 * tmp21 tmp34 = tmp32 + tmp33 tmp35 = tmp22 * tmp22 tmp36 = tmp34 - tmp35 tmp37 = tmp31 * tmp24 tmp38 = tmp37 - tmp33 tmp39 = tmp26 * tmp26 tmp40 = tmp38 - tmp39 tmp41 = -tmp22 tmp42 = tmp41 * tmp26 tmp43 = 0.0001 tmp44 = tmp40 * tmp43 tmp45 = tmp36 + tmp44 tmp46 = 0.02 tmp47 = tmp42 * tmp46 tmp48 = tmp45 - tmp47 tl.store(out_ptr0 + x0, tmp29, xmask) tl.store(in_out_ptr0 + x0, tmp48, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0[grid (256)](buf4, arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf4 def keep_variance_fn(x): return x + 0.001 def normcdf(value, mu=0.0, stddev=1.0): sinv = 1.0 / stddev if isinstance(stddev, Number) else stddev.reciprocal() return 0.5 * (1.0 + torch.erf((value - mu) * sinv / np.sqrt(2.0))) def _normal_log_pdf(value, mu, stddev): var = stddev ** 2 log_scale = np.log(stddev) if isinstance(stddev, Number) else torch.log( stddev) return -(value - mu) ** 2 / (2.0 * var) - log_scale - np.log(np.sqrt( 2.0 * np.pi)) def normpdf(value, mu=0.0, stddev=1.0): return torch.exp(_normal_log_pdf(value, mu, stddev)) class LeakyReLUNew(nn.Module): def __init__(self, negative_slope=0.01, keep_variance_fn=None): super(LeakyReLUNew, self).__init__() self._keep_variance_fn = keep_variance_fn self._negative_slope = negative_slope def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
collector-m/LiDAR-MOS
LeakyReLU
false
15,060
[ "MIT" ]
268
7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
https://github.com/collector-m/LiDAR-MOS/tree/7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
ReLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7d/c7dlf7xsdr4eyx4wciw5byptm4lpdngi52fdrrqzbrpcywi4eu2a.py # Topologically Sorted Source Nodes: [pow_2, add_2, features_stddev, div, sub_3, mul, wrapped_sqrt_1, truediv_2, erf, add, cdf, mul_4, mul_5, sub, pow_1, neg, sub_1, wrapped_log_1, sub_2, pdf, mul_6, add_3, mul_2, mul_3, outputs_mean, pow_3, outputs_variance], Original ATen: [aten.pow, aten.add, aten.sqrt, aten.div, aten.sub, aten.mul, aten.erf, aten.neg, aten.log, aten.exp] # Source node to ATen node mapping: # add => add # add_2 => add_2 # add_3 => add_3 # cdf => mul_1 # div => div # erf => erf # features_stddev => sqrt # mul => mul # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # neg => neg # outputs_mean => add_1 # outputs_variance => sub_4 # pdf => exp # pow_1 => pow_1 # pow_2 => pow_2 # pow_3 => pow_3 # sub => sub # sub_1 => div_1 # sub_2 => sub_2 # sub_3 => sub_3 # truediv_2 => div_2 # wrapped_log_1 => full_default_1 # wrapped_sqrt_1 => full_default_2 # Graph fragment: # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, %arg0_1), kwargs = {}) # %sqrt : [num_users=3] = call_function[target=torch.ops.aten.sqrt.default](args = (%arg0_1,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %sqrt), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, 0.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 1.0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.414213562373095), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %full_default_2), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %mul_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sqrt), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, 0.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, 2.0), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.9189385332046727), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_1, %full_default_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %exp), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_6), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %mul_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %exp), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 2), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %pow_3), kwargs = {}) triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_sqrt_sub_0 = async_compile.triton('triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_sqrt_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = libdevice.sqrt(tmp1) tmp3 = tmp0 / tmp2 tmp4 = 0.0 tmp5 = tmp3 - tmp4 tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = 1.414213562373095 tmp9 = tmp7 / tmp8 tmp10 = libdevice.erf(tmp9) tmp11 = tmp10 + tmp6 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tmp14 = tmp0 * tmp13 tmp15 = tmp5 * tmp5 tmp16 = -tmp15 tmp17 = tmp16 * tmp12 tmp18 = 0.9189385332046727 tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp21 = tmp2 * tmp20 tmp22 = tmp14 + tmp21 tmp23 = tmp0 * tmp0 tmp24 = tmp23 + tmp1 tmp25 = tmp24 * tmp13 tmp26 = tmp0 * tmp2 tmp27 = tmp26 * tmp20 tmp28 = tmp25 + tmp27 tmp29 = tmp22 * tmp22 tmp30 = tmp28 - tmp29 tl.store(out_ptr0 + (x0), tmp22, xmask) tl.store(out_ptr1 + (x0), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_2, add_2, features_stddev, div, sub_3, mul, wrapped_sqrt_1, truediv_2, erf, add, cdf, mul_4, mul_5, sub, pow_1, neg, sub_1, wrapped_log_1, sub_2, pdf, mul_6, add_3, mul_2, mul_3, outputs_mean, pow_3, outputs_variance], Original ATen: [aten.pow, aten.add, aten.sqrt, aten.div, aten.sub, aten.mul, aten.erf, aten.neg, aten.log, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_sqrt_sub_0.run(arg1_1, arg0_1, buf0, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn from numbers import Number def keep_variance_fn(x): return x + 0.001 def normcdf(value, mu=0.0, stddev=1.0): sinv = 1.0 / stddev if isinstance(stddev, Number) else stddev.reciprocal() return 0.5 * (1.0 + torch.erf((value - mu) * sinv / np.sqrt(2.0))) def _normal_log_pdf(value, mu, stddev): var = stddev ** 2 log_scale = np.log(stddev) if isinstance(stddev, Number) else torch.log( stddev) return -(value - mu) ** 2 / (2.0 * var) - log_scale - np.log(np.sqrt( 2.0 * np.pi)) def normpdf(value, mu=0.0, stddev=1.0): return torch.exp(_normal_log_pdf(value, mu, stddev)) class ReLU(nn.Module): def __init__(self, keep_variance_fn=None): super(ReLU, self).__init__() self._keep_variance_fn = keep_variance_fn def forward(self, features_mean, features_variance): features_stddev = torch.sqrt(features_variance) div = features_mean / features_stddev pdf = normpdf(div) cdf = normcdf(div) outputs_mean = features_mean * cdf + features_stddev * pdf outputs_variance = (features_mean ** 2 + features_variance ) * cdf + features_mean * features_stddev * pdf - outputs_mean ** 2 if self._keep_variance_fn is not None: outputs_variance = self._keep_variance_fn(outputs_variance) return outputs_mean, outputs_variance def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_sqrt_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.sqrt(tmp1) tmp3 = tmp0 / tmp2 tmp4 = 0.0 tmp5 = tmp3 - tmp4 tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = 1.414213562373095 tmp9 = tmp7 / tmp8 tmp10 = libdevice.erf(tmp9) tmp11 = tmp10 + tmp6 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tmp14 = tmp0 * tmp13 tmp15 = tmp5 * tmp5 tmp16 = -tmp15 tmp17 = tmp16 * tmp12 tmp18 = 0.9189385332046727 tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp21 = tmp2 * tmp20 tmp22 = tmp14 + tmp21 tmp23 = tmp0 * tmp0 tmp24 = tmp23 + tmp1 tmp25 = tmp24 * tmp13 tmp26 = tmp0 * tmp2 tmp27 = tmp26 * tmp20 tmp28 = tmp25 + tmp27 tmp29 = tmp22 * tmp22 tmp30 = tmp28 - tmp29 tl.store(out_ptr0 + x0, tmp22, xmask) tl.store(out_ptr1 + x0, tmp30, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_sqrt_sub_0[grid(256)]( arg1_1, arg0_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1 def keep_variance_fn(x): return x + 0.001 def normcdf(value, mu=0.0, stddev=1.0): sinv = 1.0 / stddev if isinstance(stddev, Number) else stddev.reciprocal() return 0.5 * (1.0 + torch.erf((value - mu) * sinv / np.sqrt(2.0))) def _normal_log_pdf(value, mu, stddev): var = stddev ** 2 log_scale = np.log(stddev) if isinstance(stddev, Number) else torch.log( stddev) return -(value - mu) ** 2 / (2.0 * var) - log_scale - np.log(np.sqrt( 2.0 * np.pi)) def normpdf(value, mu=0.0, stddev=1.0): return torch.exp(_normal_log_pdf(value, mu, stddev)) class ReLUNew(nn.Module): def __init__(self, keep_variance_fn=None): super(ReLUNew, self).__init__() self._keep_variance_fn = keep_variance_fn def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
collector-m/LiDAR-MOS
ReLU
false
15,061
[ "MIT" ]
268
7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
https://github.com/collector-m/LiDAR-MOS/tree/7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
SoftmaxFocalClassificationLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7s/c7snjobcmnjrjom7ympqxzy5raimpkvbnlg3ednlriz5pqkbv7ua.py # Topologically Sorted Source Nodes: [softmax, input_soft], Original ATen: [aten._softmax, aten.add] # Source node to ATen node mapping: # input_soft => add # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1e-06), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = 1e-06 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/o4/co4o4n4jkfcyh664alya42ah6yiys3k54qzn4o3iqnz4bfimpwvu.py # Topologically Sorted Source Nodes: [neg, add_1, weight, mul, log, focal, mul_2, loss_tmp], Original ATen: [aten.neg, aten.add, aten.pow, aten.mul, aten.log, aten.sum] # Source node to ATen node mapping: # add_1 => add_1 # focal => mul_1 # log => log # loss_tmp => sum_2 # mul => mul # mul_2 => mul_2 # neg => neg # weight => pow_1 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, 1.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 2.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -1.0), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %log), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %mul_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1], True), kwargs = {}) triton_poi_fused_add_log_mul_neg_pow_sum_2 = async_compile.triton('triton_poi_fused_add_log_mul_neg_pow_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_mul_neg_pow_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_log_mul_neg_pow_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp11 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp12 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp22 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp31 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp32 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp2 = -tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp6 = -1.0 tmp7 = tmp5 * tmp6 tmp8 = tl_math.log(tmp1) tmp9 = tmp7 * tmp8 tmp10 = tmp0 * tmp9 tmp13 = -tmp12 tmp14 = tmp13 + tmp3 tmp15 = tmp14 * tmp14 tmp16 = tmp15 * tmp6 tmp17 = tl_math.log(tmp12) tmp18 = tmp16 * tmp17 tmp19 = tmp11 * tmp18 tmp20 = tmp10 + tmp19 tmp23 = -tmp22 tmp24 = tmp23 + tmp3 tmp25 = tmp24 * tmp24 tmp26 = tmp25 * tmp6 tmp27 = tl_math.log(tmp22) tmp28 = tmp26 * tmp27 tmp29 = tmp21 * tmp28 tmp30 = tmp20 + tmp29 tmp33 = -tmp32 tmp34 = tmp33 + tmp3 tmp35 = tmp34 * tmp34 tmp36 = tmp35 * tmp6 tmp37 = tl_math.log(tmp32) tmp38 = tmp36 * tmp37 tmp39 = tmp31 * tmp38 tmp40 = tmp30 + tmp39 tl.store(out_ptr0 + (x2), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tm/ctm3ca5xni5jgawczm3atehhqhu23fzhzp3mv77qf5w562wrz6vf.py # Topologically Sorted Source Nodes: [mul_3], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_3 => mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, %arg2_1), kwargs = {}) triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax, input_soft], Original ATen: [aten._softmax, aten.add] triton_poi_fused__softmax_add_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0) del buf0 buf2 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [neg, add_1, weight, mul, log, focal, mul_2, loss_tmp], Original ATen: [aten.neg, aten.add, aten.pow, aten.mul, aten.log, aten.sum] triton_poi_fused_add_log_mul_neg_pow_sum_2.run(arg1_1, buf1, buf2, 64, grid=grid(64), stream=stream0) del arg1_1 buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul_3], Original ATen: [aten.mul] triton_poi_fused_mul_3.run(buf2, arg2_1, buf3, 256, grid=grid(256), stream=stream0) del arg2_1 del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SoftmaxFocalClassificationLoss(nn.Module): """Criterion that computes Focal loss. According to [1], the Focal loss is computed as follows: .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Arguments: alpha (float): Weighting factor :math:`\\alpha \\in [0, 1]`. gamma (float): Focusing parameter :math:`\\gamma >= 0`. reduction (str, optional): Specifies the reduction to apply to the output: ‘none’ | ‘mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Default: ‘none’. Shape: - Input: :math:`(N, C, *)` where C = number of classes. - Target: :math:`(N, *)` where each value is :math:`0 ≤ targets[i] ≤ C−1`. Examples: >>> N = 5 # num_classes >>> kwargs = {"alpha": 0.5, "gamma": 2.0, "reduction": 'mean'} >>> loss = kornia.losses.FocalLoss(**kwargs) >>> input = torch.randn(1, N, 3, 5, requires_grad=True) >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N) >>> output = loss(input, target) >>> output.backward() References: [1] https://arxiv.org/abs/1708.02002 """ def __init__(self, alpha: 'float'=1.0, gamma: 'float'=2.0, reduction: 'str'='none') ->None: super(SoftmaxFocalClassificationLoss, self).__init__() self.alpha: 'float' = alpha self.gamma: 'float' = gamma self.reduction: 'str' = reduction self.eps: 'float' = 1e-06 def focal_loss(self, input: 'torch.Tensor', target: 'torch.Tensor', weights: 'torch.Tensor', alpha: 'float'=1.0, gamma: 'float'=2.0, reduction: 'str'='none', eps: 'float'=1e-08) ->torch.Tensor: """Function that computes Focal loss. See :class:`~kornia.losses.FocalLoss` for details. """ if not torch.is_tensor(input): raise TypeError('Input type is not a torch.Tensor. Got {}'. format(type(input))) if not len(input.shape) >= 2: raise ValueError('Invalid input shape, we expect BxCx*. Got: {}' .format(input.shape)) if input.size(0) != target.size(0): raise ValueError( 'Expected input batch_size ({}) to match target batch_size ({}).' .format(input.size(0), target.size(0))) if not input.device == target.device: raise ValueError( 'input and target must be in the same device. Got: {} and {}' .format(input.device, target.device)) input_soft: 'torch.Tensor' = F.softmax(input, dim=1) + eps weight = torch.pow(-input_soft + 1.0, gamma) focal = -alpha * weight * torch.log(input_soft) loss_tmp = torch.sum(target * focal, dim=1, keepdims=True) if weights is None: return loss_tmp return loss_tmp * weights def forward(self, input: 'torch.Tensor', target: 'torch.Tensor', weights: 'torch.Tensor') ->torch.Tensor: return self.focal_loss(input, target, weights, self.alpha, self. gamma, self.reduction, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = 1e-06 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_log_mul_neg_pow_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp31 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = -tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp6 = -1.0 tmp7 = tmp5 * tmp6 tmp8 = tl_math.log(tmp1) tmp9 = tmp7 * tmp8 tmp10 = tmp0 * tmp9 tmp13 = -tmp12 tmp14 = tmp13 + tmp3 tmp15 = tmp14 * tmp14 tmp16 = tmp15 * tmp6 tmp17 = tl_math.log(tmp12) tmp18 = tmp16 * tmp17 tmp19 = tmp11 * tmp18 tmp20 = tmp10 + tmp19 tmp23 = -tmp22 tmp24 = tmp23 + tmp3 tmp25 = tmp24 * tmp24 tmp26 = tmp25 * tmp6 tmp27 = tl_math.log(tmp22) tmp28 = tmp26 * tmp27 tmp29 = tmp21 * tmp28 tmp30 = tmp20 + tmp29 tmp33 = -tmp32 tmp34 = tmp33 + tmp3 tmp35 = tmp34 * tmp34 tmp36 = tmp35 * tmp6 tmp37 = tl_math.log(tmp32) tmp38 = tmp36 * tmp37 tmp39 = tmp31 * tmp38 tmp40 = tmp30 + tmp39 tl.store(out_ptr0 + x2, tmp40, xmask) @triton.jit def triton_poi_fused_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_add_1[grid(256)](buf0, buf1, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused_add_log_mul_neg_pow_sum_2[grid(64)](arg1_1, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg1_1 buf3 = buf1 del buf1 triton_poi_fused_mul_3[grid(256)](buf2, arg2_1, buf3, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg2_1 del buf2 return buf3, class SoftmaxFocalClassificationLossNew(nn.Module): """Criterion that computes Focal loss. According to [1], the Focal loss is computed as follows: .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Arguments: alpha (float): Weighting factor :math:`\\alpha \\in [0, 1]`. gamma (float): Focusing parameter :math:`\\gamma >= 0`. reduction (str, optional): Specifies the reduction to apply to the output: ‘none’ | ‘mean’ | ‘sum’. ‘none’: no reduction will be applied, ‘mean’: the sum of the output will be divided by the number of elements in the output, ‘sum’: the output will be summed. Default: ‘none’. Shape: - Input: :math:`(N, C, *)` where C = number of classes. - Target: :math:`(N, *)` where each value is :math:`0 ≤ targets[i] ≤ C−1`. Examples: >>> N = 5 # num_classes >>> kwargs = {"alpha": 0.5, "gamma": 2.0, "reduction": 'mean'} >>> loss = kornia.losses.FocalLoss(**kwargs) >>> input = torch.randn(1, N, 3, 5, requires_grad=True) >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N) >>> output = loss(input, target) >>> output.backward() References: [1] https://arxiv.org/abs/1708.02002 """ def __init__(self, alpha: 'float'=1.0, gamma: 'float'=2.0, reduction: 'str'='none') ->None: super(SoftmaxFocalClassificationLossNew, self).__init__() self.alpha: 'float' = alpha self.gamma: 'float' = gamma self.reduction: 'str' = reduction self.eps: 'float' = 1e-06 def focal_loss(self, input: 'torch.Tensor', target: 'torch.Tensor', weights: 'torch.Tensor', alpha: 'float'=1.0, gamma: 'float'=2.0, reduction: 'str'='none', eps: 'float'=1e-08) ->torch.Tensor: """Function that computes Focal loss. See :class:`~kornia.losses.FocalLoss` for details. """ if not torch.is_tensor(input): raise TypeError('Input type is not a torch.Tensor. Got {}'. format(type(input))) if not len(input.shape) >= 2: raise ValueError('Invalid input shape, we expect BxCx*. Got: {}' .format(input.shape)) if input.size(0) != target.size(0): raise ValueError( 'Expected input batch_size ({}) to match target batch_size ({}).' .format(input.size(0), target.size(0))) if not input.device == target.device: raise ValueError( 'input and target must be in the same device. Got: {} and {}' .format(input.device, target.device)) input_soft: 'torch.Tensor' = F.softmax(input, dim=1) + eps weight = torch.pow(-input_soft + 1.0, gamma) focal = -alpha * weight * torch.log(input_soft) loss_tmp = torch.sum(target * focal, dim=1, keepdims=True) if weights is None: return loss_tmp return loss_tmp * weights def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
collector-m/BtcDet
SoftmaxFocalClassificationLoss
false
15,062
[ "Apache-2.0" ]
108
80bee34f2f40931600f812a6edbcb27e51cb7ec3
https://github.com/collector-m/BtcDet/tree/80bee34f2f40931600f812a6edbcb27e51cb7ec3
AvgPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/d7/cd7gnc5naykwrsak74dr3vwjj57pr5unvd4b3n6p42t7adgqlns4.py # Topologically Sorted Source Nodes: [outputs_mean], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # outputs_mean => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [2, 2], [2, 2], [1, 1]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 2*x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp17 + tmp11 tmp19 = 2*x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp24 + tmp18 tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp27 + tmp25 tmp29 = 1 + ((-2)*x0) + ((-2)*x1) + (((5) * ((5) <= (1 + (2*x0))) + (1 + (2*x0)) * ((1 + (2*x0)) < (5)))*((5) * ((5) <= (1 + (2*x1))) + (1 + (2*x1)) * ((1 + (2*x1)) < (5)))) + ((-2)*x0*((5) * ((5) <= (1 + (2*x1))) + (1 + (2*x1)) * ((1 + (2*x1)) < (5)))) + ((-2)*x1*((5) * ((5) <= (1 + (2*x0))) + (1 + (2*x0)) * ((1 + (2*x0)) < (5)))) + (4*x0*x1) + ((5) * ((5) <= (1 + (2*x0))) + (1 + (2*x0)) * ((1 + (2*x0)) < (5))) + ((5) * ((5) <= (1 + (2*x1))) + (1 + (2*x1)) * ((1 + (2*x1)) < (5))) tmp30 = tmp28 / tmp29 tl.store(out_ptr0 + (x4), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/q4/cq4cxxnd3uk4nokpwrrgnmn4dxunq3brjfsuakmfs6ygokbfba4d.py # Topologically Sorted Source Nodes: [outputs_variance, outputs_variance_1, truediv_1], Original ATen: [aten.avg_pool2d, aten.div] # Source node to ATen node mapping: # outputs_variance => avg_pool2d_1 # outputs_variance_1 => div # truediv_1 => div_1 # Graph fragment: # %avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg1_1, [2, 2], [2, 2], [1, 1]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%avg_pool2d_1, 16), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 16), kwargs = {}) triton_poi_fused_avg_pool2d_div_1 = async_compile.triton('triton_poi_fused_avg_pool2d_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_div_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x3 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x1) + (16*x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 2*x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x1) + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp17 + tmp11 tmp19 = 2*x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x1) + (16*x2)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp24 + tmp18 tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + ((2*x0) + (8*x1) + (16*x2)), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp27 + tmp25 tmp29 = 1 + ((-2)*x0) + ((-2)*x1) + (((5) * ((5) <= (1 + (2*x0))) + (1 + (2*x0)) * ((1 + (2*x0)) < (5)))*((5) * ((5) <= (1 + (2*x1))) + (1 + (2*x1)) * ((1 + (2*x1)) < (5)))) + ((-2)*x0*((5) * ((5) <= (1 + (2*x1))) + (1 + (2*x1)) * ((1 + (2*x1)) < (5)))) + ((-2)*x1*((5) * ((5) <= (1 + (2*x0))) + (1 + (2*x0)) * ((1 + (2*x0)) < (5)))) + (4*x0*x1) + ((5) * ((5) <= (1 + (2*x0))) + (1 + (2*x0)) * ((1 + (2*x0)) < (5))) + ((5) * ((5) <= (1 + (2*x1))) + (1 + (2*x1)) * ((1 + (2*x1)) < (5))) tmp30 = tmp28 / tmp29 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tmp33 = tmp32 * tmp31 tl.store(in_out_ptr0 + (x3), tmp33, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [outputs_mean], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [outputs_variance, outputs_variance_1, truediv_1], Original ATen: [aten.avg_pool2d, aten.div] triton_poi_fused_avg_pool2d_div_1.run(buf2, arg1_1, 144, grid=grid(144), stream=stream0) del arg1_1 return (buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def keep_variance_fn(x): return x + 0.001 class AvgPool2d(nn.Module): def __init__(self, keep_variance_fn=None, kernel_size=2): super(AvgPool2d, self).__init__() self._keep_variance_fn = keep_variance_fn self.kernel_size = kernel_size def forward(self, inputs_mean, inputs_variance): outputs_mean = F.avg_pool2d(inputs_mean, self.kernel_size, stride=2, padding=1) outputs_variance = F.avg_pool2d(inputs_variance, self.kernel_size, stride=2, padding=1) outputs_variance = outputs_variance / (inputs_mean.size(2) * inputs_mean.size(3)) if self._keep_variance_fn is not None: outputs_variance = self._keep_variance_fn(outputs_variance) return outputs_mean, outputs_variance / (inputs_mean.shape[2] * inputs_mean.shape[3]) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp17 + tmp11 tmp19 = 2 * x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp24 + tmp18 tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp27 + tmp25 tmp29 = 1 + -2 * x0 + -2 * x1 + (5 * (5 <= 1 + 2 * x0) + (1 + 2 * x0) * (1 + 2 * x0 < 5)) * (5 * (5 <= 1 + 2 * x1) + (1 + 2 * x1) * (1 + 2 * x1 < 5)) + -2 * x0 * (5 * (5 <= 1 + 2 * x1) + (1 + 2 * x1) * (1 + 2 * x1 < 5)) + -2 * x1 * (5 * (5 <= 1 + 2 * x0) + (1 + 2 * x0) * (1 + 2 * x0 < 5)) + 4 * x0 * x1 + (5 * (5 <= 1 + 2 * x0) + (1 + 2 * x0) * (1 + 2 * x0 < 5)) + (5 * (5 <= 1 + 2 * x1) + (1 + 2 * x1) * (1 + 2 * x1 < 5) ) tmp30 = tmp28 / tmp29 tl.store(out_ptr0 + x4, tmp30, xmask) @triton.jit def triton_poi_fused_avg_pool2d_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x3 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp17 + tmp11 tmp19 = 2 * x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp24 + tmp18 tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp27 + tmp25 tmp29 = 1 + -2 * x0 + -2 * x1 + (5 * (5 <= 1 + 2 * x0) + (1 + 2 * x0) * (1 + 2 * x0 < 5)) * (5 * (5 <= 1 + 2 * x1) + (1 + 2 * x1) * (1 + 2 * x1 < 5)) + -2 * x0 * (5 * (5 <= 1 + 2 * x1) + (1 + 2 * x1) * (1 + 2 * x1 < 5)) + -2 * x1 * (5 * (5 <= 1 + 2 * x0) + (1 + 2 * x0) * (1 + 2 * x0 < 5)) + 4 * x0 * x1 + (5 * (5 <= 1 + 2 * x0) + (1 + 2 * x0) * (1 + 2 * x0 < 5)) + (5 * (5 <= 1 + 2 * x1) + (1 + 2 * x1) * (1 + 2 * x1 < 5) ) tmp30 = tmp28 / tmp29 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tmp33 = tmp32 * tmp31 tl.store(in_out_ptr0 + x3, tmp33, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(144)](arg0_1, buf0, 144, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf2 = buf1 del buf1 triton_poi_fused_avg_pool2d_div_1[grid(144)](buf2, arg1_1, 144, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 return buf0, buf2 def keep_variance_fn(x): return x + 0.001 class AvgPool2dNew(nn.Module): def __init__(self, keep_variance_fn=None, kernel_size=2): super(AvgPool2dNew, self).__init__() self._keep_variance_fn = keep_variance_fn self.kernel_size = kernel_size def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
collector-m/LiDAR-MOS
AvgPool2d
false
15,063
[ "MIT" ]
268
7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
https://github.com/collector-m/LiDAR-MOS/tree/7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
Conv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3i/c3iahaec5pb6sirkwkcomrpurca6kdwlbapwy7tphk7f3x443klh.py # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # inputs => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%permute, [2, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 8], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 7 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = (-2) + x2 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-8) + y0 + (4*x2) + (16*y1)), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x2 + (7*y3)), tmp6, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 16, 7, grid=grid(16, 7), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Conv(nn.Module): """ Convenience class that does padding and convolution for inputs in the format [batch_size, sequence length, hidden size] """ def __init__(self, input_size, output_size, kernel_size, pad_type): """ Parameters: input_size: Input feature size output_size: Output feature size kernel_size: Kernel width pad_type: left -> pad on the left side (to mask future data), both -> pad on both sides """ super(Conv, self).__init__() padding = (kernel_size - 1, 0) if pad_type == 'left' else ( kernel_size // 2, (kernel_size - 1) // 2) self.pad = nn.ConstantPad1d(padding, 0) self.conv = nn.Conv1d(input_size, output_size, kernel_size= kernel_size, padding=0) def forward(self, inputs): inputs = self.pad(inputs.permute(0, 2, 1)) outputs = self.conv(inputs).permute(0, 2, 1) return outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'kernel_size': 4, 'pad_type': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 7 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = -2 + x2 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-8 + y0 + 4 * x2 + 16 * y1), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tl.store(out_ptr0 + (x2 + 7 * y3), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(16, 7)](primals_1, buf0, 16, 7, XBLOCK=8, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf0 class ConvNew(nn.Module): """ Convenience class that does padding and convolution for inputs in the format [batch_size, sequence length, hidden size] """ def __init__(self, input_size, output_size, kernel_size, pad_type): """ Parameters: input_size: Input feature size output_size: Output feature size kernel_size: Kernel width pad_type: left -> pad on the left side (to mask future data), both -> pad on both sides """ super(ConvNew, self).__init__() padding = (kernel_size - 1, 0) if pad_type == 'left' else ( kernel_size // 2, (kernel_size - 1) // 2) self.pad = nn.ConstantPad1d(padding, 0) self.conv = nn.Conv1d(input_size, output_size, kernel_size= kernel_size, padding=0) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
colincen/coach
Conv
false
15,064
[ "MIT" ]
72
2b1b543851cc7ba359f48dac6a5c72f1ced9b530
https://github.com/colincen/coach/tree/2b1b543851cc7ba359f48dac6a5c72f1ced9b530
FCN8VGG16
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/kn/cknyjwkwufnzzf4ya3scui55ownkmt5cdh3hggzwsfe3ch5fshzm.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5t/c5ta5b5nw4dp65565mg3k6wfbphtogtvx5v75up5yeibgiwkacek.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xq/cxq75w43anllid5ys7ss3yyizuoeph3vvaqlvm5lo434hrywtyle.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/32/c32xiwptfqtyhbnde262mvq5tzywzo6zquurttkv7sztqnze6yni.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jj/cjjz4tpbucpuc3faa2ky32crfwhb5fbnssd6o2yfkgdcjg2acfmo.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tg/ctgdsxjd3rciejxtjvi3y2w5fmmggh5lm3mivuygvkdzeb3zulmc.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/e7/ce7jqsdrj5poslb2hpufqd2wdux5xiab5n2auqal3ztzvkzrmnzl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ks/ckso6iiq5yfqfxmx7ilr6ufrmz6mlkiy75pexzhyf3ierq4pu3zl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 262144 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4i/c4islqctnux7quywor4ljttjc6krtgvecvzfsjd2pvp4i6z2bufb.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_9 = async_compile.triton('triton_poi_fused_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2097152 xnumel = 49 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (25088*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yq/cyqfo4aqihg6ahstqrepba4djklktcvcpe3dmlnrnyivjlyz3rzg.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_10 = async_compile.triton('triton_poi_fused_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wo/cwocn4wtbj2i5fzcqix5mvg4tmm4nisnn5a5ajlq2vbodwd3x6o6.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_11 = async_compile.triton('triton_poi_fused_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 256], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (256*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (4*x2) + (1024*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cp/ccp4ojszpovg6la422usrylat5keq24pc6vzzggj6xjcgfrzhcxg.py # Topologically Sorted Source Nodes: [conv2d, conv1_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv1_1 => relu # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [100, 100], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 17572864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xz/cxzlno4gfs3kc72eat6xpiu6xn5dr6yvc2opm5zd7zvxg7ggvnie.py # Topologically Sorted Source Nodes: [pool1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # pool1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_13 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4393216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) % 131 x2 = (xindex // 8384) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (33536*x2)), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (33536*x2)), xmask) tmp3 = tl.load(in_ptr0 + (16768 + x0 + (128*x1) + (33536*x2)), xmask) tmp5 = tl.load(in_ptr0 + (16832 + x0 + (128*x1) + (33536*x2)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7f/c7f3npybiepto4pskonp4rrflyi2drrmu7gy7zqiboknduhfvsfv.py # Topologically Sorted Source Nodes: [conv2d_2, conv2_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2_1 => relu_2 # conv2d_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8786432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6v/c6vh6fohzdliwr7kxmm7qx6hmyvozyr5aky4loctm53mwljdb23x.py # Topologically Sorted Source Nodes: [pool2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # pool2 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_15 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 2230272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 8448) % 66 x1 = (xindex // 128) % 66 x0 = xindex % 128 x3 = (xindex // 557568) x6 = xindex tmp0 = 2*x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 131, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2*x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp10, other=float("-inf")) tmp12 = 1 + (2*x1) tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp16, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + (2*x2) tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (16768 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp23, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (16896 + x0 + (256*x1) + (33536*x2) + (2196608*x3)), tmp26, other=float("-inf")) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + (x6), tmp28, None) tl.store(out_ptr1 + (x6), tmp38, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/t7/ct7w7cw5mqsq4ea34flessxkmsh4ej3cv3n6smkzfa27walntfeh.py # Topologically Sorted Source Nodes: [conv2d_4, conv3_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # conv3_1 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4460544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/d2/cd2lbbktrlinq53ce3g2bvhqixfzcynec4yh52pbb5no5nvmictc.py # Topologically Sorted Source Nodes: [pool3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # pool3 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_17 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = (xindex // 256) % 33 x2 = (xindex // 8448) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (33792*x2)), xmask) tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (33792*x2)), xmask) tmp3 = tl.load(in_ptr0 + (16896 + x0 + (512*x1) + (33792*x2)), xmask) tmp5 = tl.load(in_ptr0 + (17152 + x0 + (512*x1) + (33792*x2)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ka/ckakcvtkfyvarj6bygevbxelipfntuuwg3fl65cadqwzpxizcl7j.py # Topologically Sorted Source Nodes: [conv2d_7, conv4_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_7 => convolution_7 # conv4_1 => relu_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) triton_poi_fused_convolution_relu_18 = async_compile.triton('triton_poi_fused_convolution_relu_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2230272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7s/c7sk5lcaepj3olmug655q7hxtrgrqziu4e5f27yx7ka6vbwluk3s.py # Topologically Sorted Source Nodes: [pool4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # pool4 => getitem_6, getitem_7 # Graph fragment: # %getitem_6 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_19 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 591872 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 8704) % 17 x1 = (xindex // 512) % 17 x0 = xindex % 512 x3 = (xindex // 147968) x6 = xindex tmp0 = 2*x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 33, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2*x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp10, other=float("-inf")) tmp12 = 1 + (2*x1) tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp16, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + (2*x2) tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (16896 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp23, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (17408 + x0 + (1024*x1) + (33792*x2) + (557568*x3)), tmp26, other=float("-inf")) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + (x6), tmp28, None) tl.store(out_ptr1 + (x6), tmp38, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hx/chxwibttvyopiavs7og2i665mglcnendceewlhrfkqxdqprsj4r4.py # Topologically Sorted Source Nodes: [conv2d_10, conv5_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_10 => convolution_10 # conv5_1 => relu_10 # Graph fragment: # %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {}) triton_poi_fused_convolution_relu_20 = async_compile.triton('triton_poi_fused_convolution_relu_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 591872 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7u/c7usmy4jmwhqvoypqaj7minodxefdgyuup7zbx3o5sdav22vppzn.py # Topologically Sorted Source Nodes: [pool5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # pool5 => getitem_8, getitem_9 # Graph fragment: # %getitem_8 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 0), kwargs = {}) # %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_21 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_21(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 165888 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 4608) % 9 x1 = (xindex // 512) % 9 x0 = xindex % 512 x3 = (xindex // 41472) x6 = xindex tmp0 = 2*x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 17, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2*x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp10, other=float("-inf")) tmp12 = 1 + (2*x1) tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (512 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp16, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + (2*x2) tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (8704 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp23, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (9216 + x0 + (1024*x1) + (17408*x2) + (147968*x3)), tmp26, other=float("-inf")) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + (x6), tmp28, None) tl.store(out_ptr1 + (x6), tmp38, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vf/cvf6cbxucccgtdj2kshu7xqtxofuulnihdlcxxkpyduukdvzdwro.py # Topologically Sorted Source Nodes: [conv2d_13, relu_13], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_13 => convolution_13 # relu_13 => relu_13 # Graph fragment: # %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_8, %primals_28, %primals_29, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {}) triton_poi_fused_convolution_relu_22 = async_compile.triton('triton_poi_fused_convolution_relu_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_22', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 147456 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4u/c4uzwwoctqs5xcwfvqvawlvooutlhyf4mwe2dn7xspyey3sknh4q.py # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.convolution] # Source node to ATen node mapping: # scores => convolution_15 # Graph fragment: # %convolution_15 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_14, %primals_32, %primals_33, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_23 = async_compile.triton('triton_poi_fused_convolution_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_23', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/q2/cq2v772nljvrfcwedtl2znbzkvxrr36gpbjeuah4dniwdw2g3ot4.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %convolution_16), kwargs = {}) triton_poi_fused_add_24 = async_compile.triton('triton_poi_fused_add_24', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_24', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_24(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 32) % 8 x3 = (xindex // 256) x4 = xindex % 32 x0 = xindex % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (360 + x4 + (68*x2) + (1156*x3)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + (x5), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x5), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kf/ckfadyhdewmvgnf754zvemqhwrysieq26nisn2corsl3ycvba6xa.py # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] # Source node to ATen node mapping: # add_1 => add_1 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_8, %convolution_18), kwargs = {}) triton_poi_fused_add_25 = async_compile.triton('triton_poi_fused_add_25', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_25', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_25(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 5184 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 72) % 18 x3 = (xindex // 1296) x4 = xindex % 72 x0 = xindex % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (1224 + x4 + (132*x2) + (4356*x3)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + (x5), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x5), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nk/cnkppvxhwbnowlfn4o4gbk7fiwqws3ynxvzlxr5jabqy2xx5cbg6.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_12,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_26 = async_compile.triton('triton_poi_fused_clone_26', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_26(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex % 64 x3 = (xindex // 64) y0 = yindex % 4 y1 = (yindex // 4) x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (18972 + y0 + (4*x2) + (608*x3) + (92416*y1)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x5 + (4096*y4)), tmp0, ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512, ), (1, )) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512, ), (1, )) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512, ), (1, )) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512, ), (1, )) assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1)) assert_size_stride(primals_29, (4096, ), (1, )) assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_31, (4096, ), (1, )) assert_size_stride(primals_32, (4, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_33, (4, ), (1, )) assert_size_stride(primals_34, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_35, (4, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_36, (4, ), (1, )) assert_size_stride(primals_37, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_38, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_39, (4, ), (1, )) assert_size_stride(primals_40, (4, 4, 16, 16), (1024, 256, 16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_2, buf1, 192, 9, grid=grid(192, 9), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_10, buf5, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_12, buf6, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_14, buf7, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_7.run(primals_16, buf8, 131072, 9, grid=grid(131072, 9), stream=stream0) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_18, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_20, buf10, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_22, buf11, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_24, buf12, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_26, buf13, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_26 buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_9.run(primals_28, buf14, 2097152, 49, grid=grid(2097152, 49), stream=stream0) del primals_28 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_10.run(primals_34, buf15, 16, 16, grid=grid(16, 16), stream=stream0) del primals_34 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_10.run(primals_37, buf16, 16, 16, grid=grid(16, 16), stream=stream0) del primals_37 buf17 = empty_strided_cuda((4, 4, 16, 16), (1024, 1, 64, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_11.run(primals_40, buf17, 16, 256, grid=grid(16, 256), stream=stream0) del primals_40 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(100, 100), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 262, 262), (4393216, 1, 16768, 64)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [conv2d, conv1_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_12.run(buf19, primals_3, 17572864, grid=grid(17572864), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 262, 262), (4393216, 1, 16768, 64)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [conv2d_1, conv1_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_12.run(buf21, primals_5, 17572864, grid=grid(17572864), stream=stream0) del primals_5 buf22 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64), torch.float32) buf23 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64), torch.int8) # Topologically Sorted Source Nodes: [pool1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_13.run(buf21, buf22, buf23, 4393216, grid=grid(4393216), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf22, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 131, 131), (2196608, 1, 16768, 128)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [conv2d_2, conv2_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_14.run(buf25, primals_7, 8786432, grid=grid(8786432), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 131, 131), (2196608, 1, 16768, 128)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [conv2d_3, conv2_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_14.run(buf27, primals_9, 8786432, grid=grid(8786432), stream=stream0) del primals_9 buf28 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.float32) buf29 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.int8) # Topologically Sorted Source Nodes: [pool2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_15.run(buf27, buf28, buf29, 2230272, grid=grid(2230272), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf28, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [conv2d_4, conv3_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_16.run(buf31, primals_11, 4460544, grid=grid(4460544), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [conv2d_5, conv3_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_16.run(buf33, primals_13, 4460544, grid=grid(4460544), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf33, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf35 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [conv2d_6, conv3_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_16.run(buf35, primals_15, 4460544, grid=grid(4460544), stream=stream0) del primals_15 buf36 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.float32) buf37 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.int8) # Topologically Sorted Source Nodes: [pool3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_17.run(buf35, buf36, buf37, 1115136, grid=grid(1115136), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf36, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf39 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [conv2d_7, conv4_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_18.run(buf39, primals_17, 2230272, grid=grid(2230272), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf41 = buf40; del buf40 # reuse # Topologically Sorted Source Nodes: [conv2d_8, conv4_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_18.run(buf41, primals_19, 2230272, grid=grid(2230272), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf41, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf43 = buf42; del buf42 # reuse # Topologically Sorted Source Nodes: [conv2d_9, conv4_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_18.run(buf43, primals_21, 2230272, grid=grid(2230272), stream=stream0) del primals_21 buf44 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.float32) buf45 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.int8) # Topologically Sorted Source Nodes: [pool4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_19.run(buf43, buf44, buf45, 591872, grid=grid(591872), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf46 = extern_kernels.convolution(buf44, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf47 = buf46; del buf46 # reuse # Topologically Sorted Source Nodes: [conv2d_10, conv5_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_20.run(buf47, primals_23, 591872, grid=grid(591872), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf48 = extern_kernels.convolution(buf47, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf49 = buf48; del buf48 # reuse # Topologically Sorted Source Nodes: [conv2d_11, conv5_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_20.run(buf49, primals_25, 591872, grid=grid(591872), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf50 = extern_kernels.convolution(buf49, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf51 = buf50; del buf50 # reuse # Topologically Sorted Source Nodes: [conv2d_12, conv5_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_20.run(buf51, primals_27, 591872, grid=grid(591872), stream=stream0) del primals_27 buf52 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.float32) buf53 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.int8) # Topologically Sorted Source Nodes: [pool5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_21.run(buf51, buf52, buf53, 165888, grid=grid(165888), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf54 = extern_kernels.convolution(buf52, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 4096, 3, 3), (36864, 1, 12288, 4096)) buf55 = buf54; del buf54 # reuse # Topologically Sorted Source Nodes: [conv2d_13, relu_13], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_22.run(buf55, primals_29, 147456, grid=grid(147456), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf56 = extern_kernels.convolution(buf55, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 4096, 3, 3), (36864, 1, 12288, 4096)) buf57 = buf56; del buf56 # reuse # Topologically Sorted Source Nodes: [conv2d_14, relu_14], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_22.run(buf57, primals_31, 147456, grid=grid(147456), stream=stream0) del primals_31 # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.convolution] buf58 = extern_kernels.convolution(buf57, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf58, (4, 4, 3, 3), (36, 1, 12, 4)) buf59 = buf58; del buf58 # reuse # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.convolution] triton_poi_fused_convolution_23.run(buf59, primals_33, 144, grid=grid(144), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [upscore2], Original ATen: [aten.convolution] buf60 = extern_kernels.convolution(buf59, buf15, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf60, (4, 4, 8, 8), (256, 1, 32, 4)) # Topologically Sorted Source Nodes: [score_pool4], Original ATen: [aten.convolution] buf61 = extern_kernels.convolution(buf44, primals_35, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 4, 17, 17), (1156, 1, 68, 4)) buf62 = buf60; del buf60 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_24.run(buf62, buf61, primals_36, 1024, grid=grid(1024), stream=stream0) del buf61 del primals_36 # Topologically Sorted Source Nodes: [upscore_pool4], Original ATen: [aten.convolution] buf63 = extern_kernels.convolution(buf62, buf16, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf63, (4, 4, 18, 18), (1296, 1, 72, 4)) # Topologically Sorted Source Nodes: [score_pool3], Original ATen: [aten.convolution] buf64 = extern_kernels.convolution(buf36, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 4, 33, 33), (4356, 1, 132, 4)) buf65 = buf63; del buf63 # reuse # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] triton_poi_fused_add_25.run(buf65, buf64, primals_39, 5184, grid=grid(5184), stream=stream0) del buf64 del primals_39 # Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution] buf66 = extern_kernels.convolution(buf65, buf17, stride=(8, 8), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 4, 152, 152), (92416, 1, 608, 4)) buf67 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_26.run(buf66, buf67, 16, 4096, grid=grid(16, 4096), stream=stream0) del buf66 return (buf67, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32, buf15, primals_35, buf16, primals_38, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf28, buf29, buf31, buf33, buf35, buf36, buf37, buf39, buf41, buf43, buf44, buf45, buf47, buf49, buf51, buf52, buf53, buf55, buf57, buf59, buf62, buf65, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((4096, 512, 7, 7), (25088, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((4096, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((4, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((4, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((4, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((4, 4, 16, 16), (1024, 256, 16, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn import torch.utils.model_zoo as model_zoo def conv3x3(in_planes, out_planes, stride=1, padding=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=(3, 3), stride=( stride, stride), padding=(padding, padding)) def get_upsampling_weight(in_channels, out_channels, kernel_size): """Make a 2D bilinear kernel suitable for upsampling""" factor = (kernel_size + 1) // 2 if kernel_size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:kernel_size, :kernel_size] filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64) weight[range(in_channels), range(out_channels), :, :] = filt return torch.from_numpy(weight).float() class FCN8VGG16(nn.Module): def __init__(self, n_classes): super().__init__() self.n_classes = n_classes self.pool = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.relu = nn.ReLU(inplace=True) self.conv1_1 = conv3x3(3, 64, stride=1, padding=100) self.conv1_2 = conv3x3(64, 64) self.conv2_1 = conv3x3(64, 128) self.conv2_2 = conv3x3(128, 128) self.conv3_1 = conv3x3(128, 256) self.conv3_2 = conv3x3(256, 256) self.conv3_3 = conv3x3(256, 256) self.conv4_1 = conv3x3(256, 512) self.conv4_2 = conv3x3(512, 512) self.conv4_3 = conv3x3(512, 512) self.conv5_1 = conv3x3(512, 512) self.conv5_2 = conv3x3(512, 512) self.conv5_3 = conv3x3(512, 512) self.fc6 = nn.Conv2d(512, 4096, kernel_size=7, stride=1, padding=0) self.dropout_f6 = nn.Dropout() self.fc7 = nn.Conv2d(4096, 4096, kernel_size=1, stride=1, padding=0) self.dropout_f7 = nn.Dropout() self.scoring_layer = nn.Conv2d(4096, self.n_classes, kernel_size=1, stride=1, padding=0) self.upscore2 = nn.ConvTranspose2d(self.n_classes, self.n_classes, kernel_size=4, stride=2, bias=False) self.upscore_pool4 = nn.ConvTranspose2d(self.n_classes, self. n_classes, kernel_size=4, stride=2, bias=False) self.upscore8 = nn.ConvTranspose2d(self.n_classes, self.n_classes, kernel_size=16, stride=8, bias=False) self.scoring_layer.weight.data.zero_() self.scoring_layer.bias.data.zero_() self.score_pool3 = nn.Conv2d(256, self.n_classes, kernel_size=1) self.score_pool4 = nn.Conv2d(512, self.n_classes, kernel_size=1) self.score_pool3.weight.data.zero_() self.score_pool3.bias.data.zero_() self.score_pool4.weight.data.zero_() self.score_pool4.bias.data.zero_() self.upscore2.weight.data.copy_(get_upsampling_weight(self. n_classes, self.n_classes, 4)) self.upscore_pool4.weight.data.copy_(get_upsampling_weight(self. n_classes, self.n_classes, 4)) self.upscore8.weight.data.copy_(get_upsampling_weight(self. n_classes, self.n_classes, 16)) pth_url = 'https://download.pytorch.org/models/vgg16-397923af.pth' state_dict = model_zoo.load_url(pth_url) layer_names = [layer_name for layer_name in state_dict] counter = 0 for p in self.parameters(): if counter < 26: p.data = state_dict[layer_names[counter]] elif counter == 26: p.data = state_dict[layer_names[counter]].view(4096, 512, 7, 7) elif counter == 27: p.data = state_dict[layer_names[counter]] elif counter == 28: p.data = state_dict[layer_names[counter]].view(4096, 4096, 1, 1 ) elif counter == 29: p.data = state_dict[layer_names[counter]] counter += 1 def forward(self, x): _n, _c, h, w = x.size() conv1_1 = self.relu(self.conv1_1(x)) conv1_2 = self.relu(self.conv1_2(conv1_1)) pool1 = self.pool(conv1_2) conv2_1 = self.relu(self.conv2_1(pool1)) conv2_2 = self.relu(self.conv2_2(conv2_1)) pool2 = self.pool(conv2_2) conv3_1 = self.relu(self.conv3_1(pool2)) conv3_2 = self.relu(self.conv3_2(conv3_1)) conv3_3 = self.relu(self.conv3_3(conv3_2)) pool3 = self.pool(conv3_3) conv4_1 = self.relu(self.conv4_1(pool3)) conv4_2 = self.relu(self.conv4_2(conv4_1)) conv4_3 = self.relu(self.conv4_3(conv4_2)) pool4 = self.pool(conv4_3) conv5_1 = self.relu(self.conv5_1(pool4)) conv5_2 = self.relu(self.conv5_2(conv5_1)) conv5_3 = self.relu(self.conv5_3(conv5_2)) pool5 = self.pool(conv5_3) fc6 = self.dropout_f6(self.relu(self.fc6(pool5))) fc7 = self.dropout_f7(self.relu(self.fc7(fc6))) scores = self.scoring_layer(fc7) upscore2 = self.upscore2(scores) score_pool4 = self.score_pool4(pool4) score_pool4c = score_pool4[:, :, 5:5 + upscore2.size(2), 5:5 + upscore2.size(3)] upscore_pool4 = self.upscore_pool4(score_pool4c + upscore2) score_pool3 = self.score_pool3(pool3) score_pool3c = score_pool3[:, :, 9:9 + upscore_pool4.size(2), 9:9 + upscore_pool4.size(3)] output = self.upscore8(score_pool3c + upscore_pool4) return output[:, :, 31:31 + h, 31:31 + w].contiguous() def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch import nn import torch.utils.model_zoo as model_zoo assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 49 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 25088 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_11(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 1024 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 17572864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4393216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 131 x2 = xindex // 8384 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 33536 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 33536 * x2), xmask) tmp3 = tl.load(in_ptr0 + (16768 + x0 + 128 * x1 + 33536 * x2), xmask) tmp5 = tl.load(in_ptr0 + (16832 + x0 + 128 * x1 + 33536 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8786432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 8448 % 66 x1 = xindex // 128 % 66 x0 = xindex % 128 x3 = xindex // 557568 x6 = xindex tmp0 = 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 131, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp10, other=float('-inf')) tmp12 = 1 + 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x2 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (16768 + x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (16896 + x0 + 256 * x1 + 33536 * x2 + 2196608 * x3), tmp26, other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + x6, tmp28, None) tl.store(out_ptr1 + x6, tmp38, None) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_17(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = xindex // 256 % 33 x2 = xindex // 8448 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 33792 * x2), xmask) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 33792 * x2), xmask) tmp3 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 33792 * x2), xmask) tmp5 = tl.load(in_ptr0 + (17152 + x0 + 512 * x1 + 33792 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_19(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 8704 % 17 x1 = xindex // 512 % 17 x0 = xindex % 512 x3 = xindex // 147968 x6 = xindex tmp0 = 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 33, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp10, other=float('-inf')) tmp12 = 1 + 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x2 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (16896 + x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (17408 + x0 + 1024 * x1 + 33792 * x2 + 557568 * x3), tmp26, other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + x6, tmp28, None) tl.store(out_ptr1 + x6, tmp38, None) @triton.jit def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_21(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 4608 % 9 x1 = xindex // 512 % 9 x0 = xindex % 512 x3 = xindex // 41472 x6 = xindex tmp0 = 2 * x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 17, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = 2 * x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp10, other=float('-inf')) tmp12 = 1 + 2 * x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x2 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (8704 + x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (9216 + x0 + 1024 * x1 + 17408 * x2 + 147968 * x3), tmp26, other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tmp29 = tmp17 > tmp11 tmp30 = tl.full([1], 1, tl.int8) tmp31 = tl.full([1], 0, tl.int8) tmp32 = tl.where(tmp29, tmp30, tmp31) tmp33 = tmp24 > tmp18 tmp34 = tl.full([1], 2, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp27 > tmp25 tmp37 = tl.full([1], 3, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tl.store(out_ptr0 + x6, tmp28, None) tl.store(out_ptr1 + x6, tmp38, None) @triton.jit def triton_poi_fused_convolution_relu_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4096 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_24(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 32 % 8 x3 = xindex // 256 x4 = xindex % 32 x0 = xindex % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (360 + x4 + 68 * x2 + 1156 * x3), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + x5, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_poi_fused_add_25(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 5184 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 72 % 18 x3 = xindex // 1296 x4 = xindex % 72 x0 = xindex % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (1224 + x4 + 132 * x2 + 4356 * x3), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + x5, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_poi_fused_clone_26(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl .constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex % 64 x3 = xindex // 64 y0 = yindex % 4 y1 = yindex // 4 x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (18972 + y0 + 4 * x2 + 608 * x3 + 92416 * y1), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x5 + 4096 * y4), tmp0, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40) = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (4096, 512, 7, 7), (25088, 49, 7, 1)) assert_size_stride(primals_29, (4096,), (1,)) assert_size_stride(primals_30, (4096, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_31, (4096,), (1,)) assert_size_stride(primals_32, (4, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_33, (4,), (1,)) assert_size_stride(primals_34, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_35, (4, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_36, (4,), (1,)) assert_size_stride(primals_37, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_38, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_39, (4,), (1,)) assert_size_stride(primals_40, (4, 4, 16, 16), (1024, 256, 16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) triton_poi_fused_1[grid(192, 9)](primals_2, buf1, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = empty_strided_cuda((4096, 512, 7, 7), (25088, 1, 3584, 512), torch.float32) triton_poi_fused_9[grid(2097152, 49)](primals_28, buf14, 2097152, 49, XBLOCK=32, YBLOCK=64, num_warps=8, num_stages=1) del primals_28 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_10[grid(16, 16)](primals_34, buf15, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del primals_34 buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_10[grid(16, 16)](primals_37, buf16, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del primals_37 buf17 = empty_strided_cuda((4, 4, 16, 16), (1024, 1, 64, 4), torch. float32) triton_poi_fused_11[grid(16, 256)](primals_40, buf17, 16, 256, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_40 buf18 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(100, 100), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 262, 262), (4393216, 1, 16768, 64)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_12[grid(17572864)](buf19, primals_3, 17572864, XBLOCK=512, num_warps=8, num_stages=1) del primals_3 buf20 = extern_kernels.convolution(buf19, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 64, 262, 262), (4393216, 1, 16768, 64)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_12[grid(17572864)](buf21, primals_5, 17572864, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf22 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64 ), torch.float32) buf23 = empty_strided_cuda((4, 64, 131, 131), (1098304, 1, 8384, 64 ), torch.int8) triton_poi_fused_max_pool2d_with_indices_13[grid(4393216)](buf21, buf22, buf23, 4393216, XBLOCK=512, num_warps=8, num_stages=1) buf24 = extern_kernels.convolution(buf22, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 131, 131), (2196608, 1, 16768, 128)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_14[grid(8786432)](buf25, primals_7, 8786432, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf26 = extern_kernels.convolution(buf25, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 131, 131), (2196608, 1, 16768, 128)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_14[grid(8786432)](buf27, primals_9, 8786432, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf28 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.float32) buf29 = empty_strided_cuda((4, 128, 66, 66), (557568, 1, 8448, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_15[grid(2230272)](buf27, buf28, buf29, 2230272, XBLOCK=512, num_warps=8, num_stages=1) buf30 = extern_kernels.convolution(buf28, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_16[grid(4460544)](buf31, primals_11, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf32 = extern_kernels.convolution(buf31, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf33 = buf32 del buf32 triton_poi_fused_convolution_relu_16[grid(4460544)](buf33, primals_13, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf34 = extern_kernels.convolution(buf33, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 256, 66, 66), (1115136, 1, 16896, 256)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_16[grid(4460544)](buf35, primals_15, 4460544, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf36 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.float32) buf37 = empty_strided_cuda((4, 256, 33, 33), (278784, 1, 8448, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_17[grid(1115136)](buf35, buf36, buf37, 1115136, XBLOCK=512, num_warps=8, num_stages=1) buf38 = extern_kernels.convolution(buf36, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_18[grid(2230272)](buf39, primals_17, 2230272, XBLOCK=512, num_warps=8, num_stages=1) del primals_17 buf40 = extern_kernels.convolution(buf39, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf41 = buf40 del buf40 triton_poi_fused_convolution_relu_18[grid(2230272)](buf41, primals_19, 2230272, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf42 = extern_kernels.convolution(buf41, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 512, 33, 33), (557568, 1, 16896, 512)) buf43 = buf42 del buf42 triton_poi_fused_convolution_relu_18[grid(2230272)](buf43, primals_21, 2230272, XBLOCK=512, num_warps=8, num_stages=1) del primals_21 buf44 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.float32) buf45 = empty_strided_cuda((4, 512, 17, 17), (147968, 1, 8704, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_19[grid(591872)](buf43, buf44, buf45, 591872, XBLOCK=1024, num_warps=4, num_stages=1) buf46 = extern_kernels.convolution(buf44, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf47 = buf46 del buf46 triton_poi_fused_convolution_relu_20[grid(591872)](buf47, primals_23, 591872, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf48 = extern_kernels.convolution(buf47, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf49 = buf48 del buf48 triton_poi_fused_convolution_relu_20[grid(591872)](buf49, primals_25, 591872, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf50 = extern_kernels.convolution(buf49, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 512, 17, 17), (147968, 1, 8704, 512)) buf51 = buf50 del buf50 triton_poi_fused_convolution_relu_20[grid(591872)](buf51, primals_27, 591872, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 buf52 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.float32) buf53 = empty_strided_cuda((4, 512, 9, 9), (41472, 1, 4608, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_21[grid(165888)](buf51, buf52, buf53, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf54 = extern_kernels.convolution(buf52, buf14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 4096, 3, 3), (36864, 1, 12288, 4096)) buf55 = buf54 del buf54 triton_poi_fused_convolution_relu_22[grid(147456)](buf55, primals_29, 147456, XBLOCK=512, num_warps=8, num_stages=1) del primals_29 buf56 = extern_kernels.convolution(buf55, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 4096, 3, 3), (36864, 1, 12288, 4096)) buf57 = buf56 del buf56 triton_poi_fused_convolution_relu_22[grid(147456)](buf57, primals_31, 147456, XBLOCK=512, num_warps=8, num_stages=1) del primals_31 buf58 = extern_kernels.convolution(buf57, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf58, (4, 4, 3, 3), (36, 1, 12, 4)) buf59 = buf58 del buf58 triton_poi_fused_convolution_23[grid(144)](buf59, primals_33, 144, XBLOCK=128, num_warps=4, num_stages=1) del primals_33 buf60 = extern_kernels.convolution(buf59, buf15, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf60, (4, 4, 8, 8), (256, 1, 32, 4)) buf61 = extern_kernels.convolution(buf44, primals_35, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 4, 17, 17), (1156, 1, 68, 4)) buf62 = buf60 del buf60 triton_poi_fused_add_24[grid(1024)](buf62, buf61, primals_36, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf61 del primals_36 buf63 = extern_kernels.convolution(buf62, buf16, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf63, (4, 4, 18, 18), (1296, 1, 72, 4)) buf64 = extern_kernels.convolution(buf36, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 4, 33, 33), (4356, 1, 132, 4)) buf65 = buf63 del buf63 triton_poi_fused_add_25[grid(5184)](buf65, buf64, primals_39, 5184, XBLOCK=128, num_warps=4, num_stages=1) del buf64 del primals_39 buf66 = extern_kernels.convolution(buf65, buf17, stride=(8, 8), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 4, 152, 152), (92416, 1, 608, 4)) buf67 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_clone_26[grid(16, 4096)](buf66, buf67, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf66 return (buf67, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, primals_32, buf15, primals_35, buf16, primals_38, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf28, buf29, buf31, buf33, buf35, buf36, buf37, buf39, buf41, buf43, buf44, buf45, buf47, buf49, buf51, buf52, buf53, buf55, buf57, buf59, buf62, buf65) def conv3x3(in_planes, out_planes, stride=1, padding=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=(3, 3), stride=( stride, stride), padding=(padding, padding)) def get_upsampling_weight(in_channels, out_channels, kernel_size): """Make a 2D bilinear kernel suitable for upsampling""" factor = (kernel_size + 1) // 2 if kernel_size % 2 == 1: center = factor - 1 else: center = factor - 0.5 og = np.ogrid[:kernel_size, :kernel_size] filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor) weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64) weight[range(in_channels), range(out_channels), :, :] = filt return torch.from_numpy(weight).float() class FCN8VGG16New(nn.Module): def __init__(self, n_classes): super().__init__() self.n_classes = n_classes self.pool = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.relu = nn.ReLU(inplace=True) self.conv1_1 = conv3x3(3, 64, stride=1, padding=100) self.conv1_2 = conv3x3(64, 64) self.conv2_1 = conv3x3(64, 128) self.conv2_2 = conv3x3(128, 128) self.conv3_1 = conv3x3(128, 256) self.conv3_2 = conv3x3(256, 256) self.conv3_3 = conv3x3(256, 256) self.conv4_1 = conv3x3(256, 512) self.conv4_2 = conv3x3(512, 512) self.conv4_3 = conv3x3(512, 512) self.conv5_1 = conv3x3(512, 512) self.conv5_2 = conv3x3(512, 512) self.conv5_3 = conv3x3(512, 512) self.fc6 = nn.Conv2d(512, 4096, kernel_size=7, stride=1, padding=0) self.dropout_f6 = nn.Dropout() self.fc7 = nn.Conv2d(4096, 4096, kernel_size=1, stride=1, padding=0) self.dropout_f7 = nn.Dropout() self.scoring_layer = nn.Conv2d(4096, self.n_classes, kernel_size=1, stride=1, padding=0) self.upscore2 = nn.ConvTranspose2d(self.n_classes, self.n_classes, kernel_size=4, stride=2, bias=False) self.upscore_pool4 = nn.ConvTranspose2d(self.n_classes, self. n_classes, kernel_size=4, stride=2, bias=False) self.upscore8 = nn.ConvTranspose2d(self.n_classes, self.n_classes, kernel_size=16, stride=8, bias=False) self.scoring_layer.weight.data.zero_() self.scoring_layer.bias.data.zero_() self.score_pool3 = nn.Conv2d(256, self.n_classes, kernel_size=1) self.score_pool4 = nn.Conv2d(512, self.n_classes, kernel_size=1) self.score_pool3.weight.data.zero_() self.score_pool3.bias.data.zero_() self.score_pool4.weight.data.zero_() self.score_pool4.bias.data.zero_() self.upscore2.weight.data.copy_(get_upsampling_weight(self. n_classes, self.n_classes, 4)) self.upscore_pool4.weight.data.copy_(get_upsampling_weight(self. n_classes, self.n_classes, 4)) self.upscore8.weight.data.copy_(get_upsampling_weight(self. n_classes, self.n_classes, 16)) pth_url = 'https://download.pytorch.org/models/vgg16-397923af.pth' state_dict = model_zoo.load_url(pth_url) layer_names = [layer_name for layer_name in state_dict] counter = 0 for p in self.parameters(): if counter < 26: p.data = state_dict[layer_names[counter]] elif counter == 26: p.data = state_dict[layer_names[counter]].view(4096, 512, 7, 7) elif counter == 27: p.data = state_dict[layer_names[counter]] elif counter == 28: p.data = state_dict[layer_names[counter]].view(4096, 4096, 1, 1 ) elif counter == 29: p.data = state_dict[layer_names[counter]] counter += 1 def forward(self, input_0): primals_2 = self.conv1_1.weight primals_3 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv4_1.weight primals_17 = self.conv4_1.bias primals_18 = self.conv4_2.weight primals_19 = self.conv4_2.bias primals_20 = self.conv4_3.weight primals_21 = self.conv4_3.bias primals_22 = self.conv5_1.weight primals_23 = self.conv5_1.bias primals_24 = self.conv5_2.weight primals_25 = self.conv5_2.bias primals_26 = self.conv5_3.weight primals_27 = self.conv5_3.bias primals_28 = self.fc6.weight primals_29 = self.fc6.bias primals_30 = self.fc7.weight primals_31 = self.fc7.bias primals_32 = self.scoring_layer.weight primals_33 = self.scoring_layer.bias primals_34 = self.upscore2.weight primals_37 = self.upscore_pool4.weight primals_40 = self.upscore8.weight primals_38 = self.score_pool3.weight primals_36 = self.score_pool3.bias primals_35 = self.score_pool4.weight primals_39 = self.score_pool4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40]) return output[0]
alzayats/DeepFish
FCN8VGG16
false
15,065
[ "MIT" ]
48
4d9ebfb0474a7e9346c72e2a5411ab6f72e878e2
https://github.com/alzayats/DeepFish/tree/4d9ebfb0474a7e9346c72e2a5411ab6f72e878e2
Softmax
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ch/cchw4r62r5bnmw7dkm2r4jxdawjyon644tohbofuekisgaimhe7b.py # Topologically Sorted Source Nodes: [mul, log_gaussian_mean, log_gaussian_mean_1, sum_1, constant], Original ATen: [aten.mul, aten.add, aten.exp, aten.sum] # Source node to ATen node mapping: # constant => add_1 # log_gaussian_mean => add # log_gaussian_mean_1 => exp # mul => mul # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %mul), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%add,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1]), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-05), kwargs = {}) triton_poi_fused_add_exp_mul_sum_0 = async_compile.triton('triton_poi_fused_add_exp_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp12 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp13 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp18 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl_math.exp(tmp4) tmp8 = tmp7 * tmp2 tmp9 = tmp6 + tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tmp5 + tmp10 tmp14 = tmp13 * tmp2 tmp15 = tmp12 + tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp11 + tmp16 tmp20 = tmp19 * tmp2 tmp21 = tmp18 + tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp17 + tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tl.store(out_ptr0 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7v/c7vyow4rdwzqjpwa5sb57m2yvdgs2p724s63sfflruxo2ubujcld.py # Topologically Sorted Source Nodes: [mul, log_gaussian_mean, log_gaussian_mean_1, outputs_mean, log_gaussian_variance, log_gaussian_variance_1, exp_2, sub, log_gaussian_variance_2, pow_1, outputs_variance], Original ATen: [aten.mul, aten.add, aten.exp, aten.div, aten.sub, aten.pow] # Source node to ATen node mapping: # exp_2 => exp_2 # log_gaussian_mean => add # log_gaussian_mean_1 => exp # log_gaussian_variance => mul_1 # log_gaussian_variance_1 => exp_1 # log_gaussian_variance_2 => mul_2 # mul => mul # outputs_mean => div # outputs_variance => div_1 # pow_1 => pow_1 # sub => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %mul), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %unsqueeze), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_1,), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%exp_2, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_1, %sub), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%unsqueeze, 2), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, %pow_1), kwargs = {}) triton_poi_fused_add_div_exp_mul_pow_sub_1 = async_compile.triton('triton_poi_fused_add_div_exp_mul_pow_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_mul_pow_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_exp_mul_pow_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp6 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp8 = 2.0 tmp9 = tmp4 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl_math.exp(tmp1) tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp14 = tmp10 * tmp13 tmp15 = tmp6 * tmp6 tmp16 = tmp14 / tmp15 tl.store(out_ptr0 + (x3), tmp7, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, log_gaussian_mean, log_gaussian_mean_1, sum_1, constant], Original ATen: [aten.mul, aten.add, aten.exp, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_add_exp_mul_sum_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, log_gaussian_mean, log_gaussian_mean_1, outputs_mean, log_gaussian_variance, log_gaussian_variance_1, exp_2, sub, log_gaussian_variance_2, pow_1, outputs_variance], Original ATen: [aten.mul, aten.add, aten.exp, aten.div, aten.sub, aten.pow] triton_poi_fused_add_div_exp_mul_pow_sub_1.run(arg1_1, arg0_1, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 del buf0 return (buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def keep_variance_fn(x): return x + 0.001 class Softmax(nn.Module): def __init__(self, dim=1, keep_variance_fn=None): super(Softmax, self).__init__() self.dim = dim self._keep_variance_fn = keep_variance_fn def forward(self, features_mean, features_variance, eps=1e-05): """Softmax function applied to a multivariate Gaussian distribution. It works under the assumption that features_mean and features_variance are the parameters of a the indepent gaussians that contribute to the multivariate gaussian. Mean and variance of the log-normal distribution are computed following https://en.wikipedia.org/wiki/Log-normal_distribution.""" log_gaussian_mean = features_mean + 0.5 * features_variance log_gaussian_variance = 2 * log_gaussian_mean log_gaussian_mean = torch.exp(log_gaussian_mean) log_gaussian_variance = torch.exp(log_gaussian_variance) log_gaussian_variance = log_gaussian_variance * (torch.exp( features_variance) - 1) constant = torch.sum(log_gaussian_mean, dim=self.dim) + eps constant = constant.unsqueeze(self.dim) outputs_mean = log_gaussian_mean / constant outputs_variance = log_gaussian_variance / constant ** 2 if self._keep_variance_fn is not None: outputs_variance = self._keep_variance_fn(outputs_variance) return outputs_mean, outputs_variance def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl_math.exp(tmp4) tmp8 = tmp7 * tmp2 tmp9 = tmp6 + tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tmp5 + tmp10 tmp14 = tmp13 * tmp2 tmp15 = tmp12 + tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp11 + tmp16 tmp20 = tmp19 * tmp2 tmp21 = tmp18 + tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp17 + tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tl.store(out_ptr0 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_add_div_exp_mul_pow_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp6 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp8 = 2.0 tmp9 = tmp4 * tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl_math.exp(tmp1) tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp14 = tmp10 * tmp13 tmp15 = tmp6 * tmp6 tmp16 = tmp14 / tmp15 tl.store(out_ptr0 + x3, tmp7, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_mul_sum_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_exp_mul_pow_sub_1[grid(256)](arg1_1, arg0_1, buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf0 return buf1, buf2 def keep_variance_fn(x): return x + 0.001 class SoftmaxNew(nn.Module): def __init__(self, dim=1, keep_variance_fn=None): super(SoftmaxNew, self).__init__() self.dim = dim self._keep_variance_fn = keep_variance_fn def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
collector-m/LiDAR-MOS
Softmax
false
15,066
[ "MIT" ]
268
7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
https://github.com/collector-m/LiDAR-MOS/tree/7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
lovasz_hinge
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qm/cqmiebj3omt27mbkqpnnkw5z77l3s4xgfv6gqogfvmadhw2rd3m7.py # Topologically Sorted Source Nodes: [valid], Original ATen: [aten.ne] # Source node to ATen node mapping: # valid => ne # Graph fragment: # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%view_1, 255), kwargs = {}) triton_poi_fused_ne_0 = async_compile.triton('triton_poi_fused_ne_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ne_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_ne_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 255.0 tmp2 = tmp0 != tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, ), (1, ), torch.bool) # Topologically Sorted Source Nodes: [valid], Original ATen: [aten.ne] stream0 = get_raw_stream(0) triton_poi_fused_ne_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) return (reinterpret_tensor(arg0_1, (256, ), (1, ), 0), buf0, reinterpret_tensor(arg1_1, (256, ), (1, ), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.parallel import torch.utils.data from torchvision.transforms import functional as F import torch.nn.functional as F from torch.autograd import Variable def flatten_binary_scores(scores, labels, ignore=255): """ Flattens predictions in the batch (binary case) Remove labels equal to 'ignore' """ scores = scores.view(-1) labels = labels.view(-1) if ignore is None: return scores, labels valid = labels != ignore vscores = scores[valid] vlabels = labels[valid] return vscores, vlabels def lovasz_grad(gt_sorted): """ Computes gradient of the Lovasz extension w.r.t sorted errors See Alg. 1 in paper """ p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts.float() - gt_sorted.float().cumsum(0) union = gts.float() + (1 - gt_sorted).float().cumsum(0) jaccard = 1.0 - intersection / union if p > 1: jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] return jaccard def isnan(x): return x != x def mean(l, ignore_nan=False, empty=0): """ nanmean compatible with generators. """ l = iter(l) if ignore_nan: l = ifilterfalse(isnan, l) try: n = 1 acc = next(l) except StopIteration: if empty == 'raise': raise ValueError('Empty mean') return empty for n, v in enumerate(l, 2): acc += v if n == 1: return acc return acc / n class lovasz_hinge(torch.nn.Module): def __init__(self, per_img=False, ignore=255): """ :param weight: 1D weight vector to deal with the class-imbalance """ super().__init__() self.per_image = per_img self.ignore = ignore def lovasz_hinge_flat(self, logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: return logits.sum() * 0.0 signs = 2.0 * labels.float() - 1.0 errors = 1.0 - logits * Variable(signs) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss def forward(self, logits, labels): """ Binary Lovasz hinge loss logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) per_image: compute the loss per image instead of per batch ignore: void class id """ if self.per_image: loss = mean(self.lovasz_hinge_flat(*flatten_binary_scores(log. unsqueeze(0), lab.unsqueeze(0), self.ignore)) for log, lab in zip(logits, labels)) else: loss = self.lovasz_hinge_flat(*flatten_binary_scores(logits, labels, self.ignore)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.parallel import torch.utils.data from torchvision.transforms import functional as F import torch.nn.functional as F from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_ne_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 255.0 tmp2 = tmp0 != tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256,), (1,), torch.bool) get_raw_stream(0) triton_poi_fused_ne_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(arg0_1, (256,), (1,), 0 ), buf0, reinterpret_tensor(arg1_1, (256,), (1,), 0) def flatten_binary_scores(scores, labels, ignore=255): """ Flattens predictions in the batch (binary case) Remove labels equal to 'ignore' """ scores = scores.view(-1) labels = labels.view(-1) if ignore is None: return scores, labels valid = labels != ignore vscores = scores[valid] vlabels = labels[valid] return vscores, vlabels def lovasz_grad(gt_sorted): """ Computes gradient of the Lovasz extension w.r.t sorted errors See Alg. 1 in paper """ p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts.float() - gt_sorted.float().cumsum(0) union = gts.float() + (1 - gt_sorted).float().cumsum(0) jaccard = 1.0 - intersection / union if p > 1: jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] return jaccard def isnan(x): return x != x def mean(l, ignore_nan=False, empty=0): """ nanmean compatible with generators. """ l = iter(l) if ignore_nan: l = ifilterfalse(isnan, l) try: n = 1 acc = next(l) except StopIteration: if empty == 'raise': raise ValueError('Empty mean') return empty for n, v in enumerate(l, 2): acc += v if n == 1: return acc return acc / n class lovasz_hingeNew(torch.nn.Module): def __init__(self, per_img=False, ignore=255): """ :param weight: 1D weight vector to deal with the class-imbalance """ super().__init__() self.per_image = per_img self.ignore = ignore def lovasz_hinge_flat(self, logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: return logits.sum() * 0.0 signs = 2.0 * labels.float() - 1.0 errors = 1.0 - logits * Variable(signs) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
clovaai/ext_portrait_segmentation
lovasz_hinge
false
15,067
[ "MIT" ]
227
9bc1bada1cb7bd17a3a80a2964980f4b4befef5b
https://github.com/clovaai/ext_portrait_segmentation/tree/9bc1bada1cb7bd17a3a80a2964980f4b4befef5b
Linear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/z3/cz3vnukh6pe6vae6yku3d5i2zk5dmliz7g2djpbn4duarwog4vtd.py # Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow] # Source node to ATen node mapping: # pow_1 => pow_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) triton_poi_fused_pow_0 = async_compile.triton('triton_poi_fused_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [outputs_mean], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow] stream0 = get_raw_stream(0) triton_poi_fused_pow_0.run(primals_1, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [outputs_variance], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter def keep_variance_fn(x): return x + 0.001 class Linear(nn.Module): def __init__(self, in_features, out_features, bias=True, keep_variance_fn=None): super(Linear, self).__init__() self._keep_variance_fn = keep_variance_fn self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) def forward(self, inputs_mean, inputs_variance): outputs_mean = F.linear(inputs_mean, self.weight, self.bias) outputs_variance = F.linear(inputs_variance, self.weight ** 2, None) if self._keep_variance_fn is not None: outputs_variance = self._keep_variance_fn(outputs_variance) return outputs_mean, outputs_variance def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(16)](primals_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2) del buf1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0) def keep_variance_fn(x): return x + 0.001 class LinearNew(nn.Module): def __init__(self, in_features, out_features, bias=True, keep_variance_fn=None): super(LinearNew, self).__init__() self._keep_variance_fn = keep_variance_fn self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) def forward(self, input_0, input_1): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
collector-m/LiDAR-MOS
Linear
false
15,068
[ "MIT" ]
268
7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
https://github.com/collector-m/LiDAR-MOS/tree/7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
Conv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/sr/csrhhqsexdcor6gq6tz4dawxblhadgekinzxxkt33uwojltligp6.py # Topologically Sorted Source Nodes: [outputs_mean], Original ATen: [aten.convolution] # Source node to ATen node mapping: # outputs_mean => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2y/c2y2depfxa2z6x54ydtq4sztgglwdvphffk5xy2ad6meezomjm6h.py # Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow] # Source node to ATen node mapping: # pow_1 => pow_1 # Graph fragment: # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) triton_poi_fused_pow_1 = async_compile.triton('triton_poi_fused_pow_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [outputs_mean], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [outputs_mean], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow] triton_poi_fused_pow_1.run(primals_1, buf2, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [outputs_variance], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) return (buf1, buf3, primals_1, primals_3, primals_4, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch.nn.modules.conv import _ConvNd from torch.nn.modules.utils import _pair def keep_variance_fn(x): return x + 0.001 class Conv2d(_ConvNd): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, keep_variance_fn=None, padding_mode='zeros'): self._keep_variance_fn = keep_variance_fn kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode) def forward(self, inputs_mean, inputs_variance): outputs_mean = F.conv2d(inputs_mean, self.weight, self.bias, self. stride, self.padding, self.dilation, self.groups) outputs_variance = F.conv2d(inputs_variance, self.weight ** 2, None, self.stride, self.padding, self.dilation, self.groups) if self._keep_variance_fn is not None: outputs_variance = self._keep_variance_fn(outputs_variance) return outputs_mean, outputs_variance def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn.modules.conv import _ConvNd from torch.nn.modules.utils import _pair assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_pow_1[grid(256)](primals_1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) return buf1, buf3, primals_1, primals_3, primals_4, buf2 def keep_variance_fn(x): return x + 0.001 class Conv2dNew(_ConvNd): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, keep_variance_fn=None, padding_mode='zeros'): self._keep_variance_fn = keep_variance_fn kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(Conv2dNew, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode) def forward(self, input_0, input_1): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
collector-m/LiDAR-MOS
Conv2d
false
15,069
[ "MIT" ]
268
7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
https://github.com/collector-m/LiDAR-MOS/tree/7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
MaxPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/u4/cu46rjpw6oa6mworxhmsjuu2sie5fvmufgcgjaooowmjpalwpn2r.py # Topologically Sorted Source Nodes: [ab, add, stddev, alpha, sub_1, pow_1, neg, sub_2, wrapped_log_1, sub_3, pdf, mul_2, sub_4, mul, wrapped_sqrt_1, truediv_2, erf, add_1, cdf, mul_3, add_2, z_mu, add_4, mul_4, mul_5, pow_2, add_5, mul_6, add_6, pow_3, add_7, sub_5, mul_7, add_8, pow_4, z_var], Original ATen: [aten.sub, aten.add, aten.sqrt, aten.div, aten.pow, aten.neg, aten.log, aten.exp, aten.mul, aten.erf, aten.rsub] # Source node to ATen node mapping: # ab => sub # add => add # add_1 => add_1 # add_2 => add_2 # add_4 => add_4 # add_5 => add_5 # add_6 => add_6 # add_7 => add_7 # add_8 => add_8 # alpha => div # cdf => mul_1 # erf => erf # mul => mul # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # neg => neg # pdf => exp # pow_1 => pow_1 # pow_2 => pow_2 # pow_3 => pow_3 # pow_4 => pow_4 # stddev => sqrt # sub_1 => sub_1 # sub_2 => div_1 # sub_3 => sub_3 # sub_4 => sub_4 # sub_5 => sub_5 # truediv_2 => div_2 # wrapped_log_1 => full_default_1 # wrapped_sqrt_1 => full_default_2 # z_mu => add_3 # z_var => sub_6 # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_4, %slice_8), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_12, %slice_16), kwargs = {}) # %sqrt : [num_users=3] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, 0.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, 2.0), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.9189385332046727), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_1, %full_default_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %exp), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, 0.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, 1.0), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.414213562373095), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %full_default_2), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div_2,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %mul_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %slice_8), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, %slice_8), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, %sqrt), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %exp), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%slice_4, 2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, %slice_12), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, %mul_1), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %mul_6), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%slice_8, 2), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %slice_16), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %sub_5), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_7), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_3, 2), kwargs = {}) # %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_8, %pow_4), kwargs = {}) triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0 = async_compile.triton('triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (2*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (2*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tmp2 * tmp6 tmp8 = tmp0 - tmp1 tmp9 = tmp8 / tmp6 tmp10 = 0.0 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = -tmp12 tmp14 = 0.5 tmp15 = tmp13 * tmp14 tmp16 = 0.9189385332046727 tmp17 = tmp15 - tmp16 tmp18 = tl_math.exp(tmp17) tmp19 = tmp7 * tmp18 tmp20 = tmp0 * tmp0 tmp21 = tmp20 + tmp3 tmp22 = 1.0 tmp23 = tmp11 * tmp22 tmp24 = 1.414213562373095 tmp25 = tmp23 / tmp24 tmp26 = libdevice.erf(tmp25) tmp27 = tmp26 + tmp22 tmp28 = tmp27 * tmp14 tmp29 = tmp21 * tmp28 tmp30 = tmp19 + tmp29 tmp31 = tmp6 * tmp18 tmp32 = tmp8 * tmp28 tmp33 = tmp31 + tmp32 tmp34 = tmp33 + tmp1 tmp35 = tmp34 * tmp34 tmp36 = tmp1 * tmp1 tmp37 = tmp36 + tmp4 tmp38 = tmp22 - tmp28 tmp39 = tmp37 * tmp38 tmp40 = tmp30 + tmp39 tmp41 = tmp40 - tmp35 tl.store(in_out_ptr0 + (x0), tmp41, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/vi/cviiumjub3u57kibptax7jl6tgmdovps6rx7eaeb6mrngefnmjyp.py # Topologically Sorted Source Nodes: [add_13, add_9, stddev_1, mul_12, ab_1, alpha_1, sub_8, pow_5, neg_1, sub_9, wrapped_log_3, sub_10, pdf_1, mul_13, pow_6, add_14, sub_11, mul_8, wrapped_sqrt_3, truediv_5, erf_1, add_10, cdf_1, mul_14, add_15, pow_7, add_16, sub_12, mul_15, add_17, mul_10, mul_11, add_11, z_mu_1, pow_8, z_var_1], Original ATen: [aten.add, aten.sqrt, aten.mul, aten.sub, aten.div, aten.pow, aten.neg, aten.log, aten.exp, aten.erf, aten.rsub] # Source node to ATen node mapping: # ab_1 => sub_7 # add_10 => add_10 # add_11 => add_11 # add_13 => add_13 # add_14 => add_14 # add_15 => add_15 # add_16 => add_16 # add_17 => add_17 # add_9 => add_9 # alpha_1 => div_3 # cdf_1 => mul_9 # erf_1 => erf_1 # mul_10 => mul_10 # mul_11 => mul_11 # mul_12 => mul_12 # mul_13 => mul_13 # mul_14 => mul_14 # mul_15 => mul_15 # mul_8 => mul_8 # neg_1 => neg_1 # pdf_1 => exp_1 # pow_5 => pow_5 # pow_6 => pow_6 # pow_7 => pow_7 # pow_8 => pow_8 # stddev_1 => sqrt_3 # sub_10 => sub_10 # sub_11 => sub_11 # sub_12 => sub_12 # sub_8 => sub_8 # sub_9 => div_4 # truediv_5 => div_5 # wrapped_log_3 => full_default_4 # wrapped_sqrt_3 => full_default_5 # z_mu_1 => add_12 # z_var_1 => sub_13 # Graph fragment: # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_19, %slice_23), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_27, %slice_31), kwargs = {}) # %sqrt_3 : [num_users=3] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_9,), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_13, %sqrt_3), kwargs = {}) # %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_19, %slice_23), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_7, %sqrt_3), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_3, 0.0), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_8, 2), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_5,), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg_1, 2.0), kwargs = {}) # %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.9189385332046727), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_4, %full_default_4), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_10,), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_12, %exp_1), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%slice_19, 2), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_6, %slice_27), kwargs = {}) # %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_3, 0.0), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, 1.0), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.414213562373095), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_8, %full_default_5), kwargs = {}) # %erf_1 : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div_5,), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf_1, 1.0), kwargs = {}) # %mul_9 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_10, 0.5), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_14, %mul_9), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_13, %mul_14), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%slice_23, 2), kwargs = {}) # %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_7, %slice_31), kwargs = {}) # %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_9), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_16, %sub_12), kwargs = {}) # %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_15, %mul_15), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt_3, %exp_1), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %mul_9), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, %mul_11), kwargs = {}) # %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %slice_23), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_12, 2), kwargs = {}) # %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_17, %pow_8), kwargs = {}) triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_1 = async_compile.triton('triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr1 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last') tmp54 = tl.load(in_ptr2 + (x0 + (4*x1)), xmask) tmp55 = tl.load(in_ptr2 + (2 + x0 + (4*x1)), xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.sqrt(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tmp6 / tmp3 tmp8 = 0.0 tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = -tmp10 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tmp14 = 0.9189385332046727 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp3 * tmp16 tmp18 = 1.0 tmp19 = tmp9 * tmp18 tmp20 = 1.414213562373095 tmp21 = tmp19 / tmp20 tmp22 = libdevice.erf(tmp21) tmp23 = tmp22 + tmp18 tmp24 = tmp23 * tmp12 tmp25 = tmp6 * tmp24 tmp26 = tmp17 + tmp25 tmp27 = tmp26 + tmp5 tmp28 = tmp27 * tmp27 tmp31 = tmp29 + tmp30 tmp32 = libdevice.sqrt(tmp31) tmp35 = tmp33 - tmp34 tmp36 = tmp35 / tmp32 tmp37 = tmp36 - tmp8 tmp38 = tmp37 * tmp37 tmp39 = -tmp38 tmp40 = tmp39 * tmp12 tmp41 = tmp40 - tmp14 tmp42 = tl_math.exp(tmp41) tmp43 = tmp32 * tmp42 tmp44 = tmp37 * tmp18 tmp45 = tmp44 / tmp20 tmp46 = libdevice.erf(tmp45) tmp47 = tmp46 + tmp18 tmp48 = tmp47 * tmp12 tmp49 = tmp35 * tmp48 tmp50 = tmp43 + tmp49 tmp51 = tmp50 + tmp34 tmp52 = tmp51 + tmp27 tmp53 = tmp51 - tmp27 tmp56 = tmp54 + tmp55 tmp57 = libdevice.sqrt(tmp56) tmp58 = tmp53 / tmp57 tmp59 = tmp58 - tmp8 tmp60 = tmp59 * tmp59 tmp61 = -tmp60 tmp62 = tmp61 * tmp12 tmp63 = tmp62 - tmp14 tmp64 = tl_math.exp(tmp63) tmp65 = tmp57 * tmp64 tmp66 = tmp59 * tmp18 tmp67 = tmp66 / tmp20 tmp68 = libdevice.erf(tmp67) tmp69 = tmp68 + tmp18 tmp70 = tmp69 * tmp12 tmp71 = tmp53 * tmp70 tmp72 = tmp65 + tmp71 tmp73 = tmp72 + tmp27 tmp74 = tmp51 * tmp51 tmp75 = tmp52 * tmp57 tmp76 = tmp75 * tmp64 tmp77 = tmp74 + tmp54 tmp78 = tmp77 * tmp70 tmp79 = tmp76 + tmp78 tmp80 = tmp28 + tmp55 tmp81 = tmp18 - tmp70 tmp82 = tmp80 * tmp81 tmp83 = tmp79 + tmp82 tmp84 = tmp73 * tmp73 tmp85 = tmp83 - tmp84 tl.store(out_ptr2 + (x2), tmp73, xmask) tl.store(in_out_ptr0 + (x2), tmp85, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [ab, add, stddev, alpha, sub_1, pow_1, neg, sub_2, wrapped_log_1, sub_3, pdf, mul_2, sub_4, mul, wrapped_sqrt_1, truediv_2, erf, add_1, cdf, mul_3, add_2, z_mu, add_4, mul_4, mul_5, pow_2, add_5, mul_6, add_6, pow_3, add_7, sub_5, mul_7, add_8, pow_4, z_var], Original ATen: [aten.sub, aten.add, aten.sqrt, aten.div, aten.pow, aten.neg, aten.log, aten.exp, aten.mul, aten.erf, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0.run(buf3, arg0_1, arg1_1, 128, grid=grid(128), stream=stream0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf6 = buf0; del buf0 # reuse buf9 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [add_13, add_9, stddev_1, mul_12, ab_1, alpha_1, sub_8, pow_5, neg_1, sub_9, wrapped_log_3, sub_10, pdf_1, mul_13, pow_6, add_14, sub_11, mul_8, wrapped_sqrt_3, truediv_5, erf_1, add_10, cdf_1, mul_14, add_15, pow_7, add_16, sub_12, mul_15, add_17, mul_10, mul_11, add_11, z_mu_1, pow_8, z_var_1], Original ATen: [aten.add, aten.sqrt, aten.mul, aten.sub, aten.div, aten.pow, aten.neg, aten.log, aten.exp, aten.erf, aten.rsub] triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_1.run(buf9, arg1_1, arg0_1, buf3, buf8, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 del buf3 return (buf8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn from numbers import Number def keep_variance_fn(x): return x + 0.001 def normcdf(value, mu=0.0, stddev=1.0): sinv = 1.0 / stddev if isinstance(stddev, Number) else stddev.reciprocal() return 0.5 * (1.0 + torch.erf((value - mu) * sinv / np.sqrt(2.0))) def _normal_log_pdf(value, mu, stddev): var = stddev ** 2 log_scale = np.log(stddev) if isinstance(stddev, Number) else torch.log( stddev) return -(value - mu) ** 2 / (2.0 * var) - log_scale - np.log(np.sqrt( 2.0 * np.pi)) def normpdf(value, mu=0.0, stddev=1.0): return torch.exp(_normal_log_pdf(value, mu, stddev)) class MaxPool2d(nn.Module): def __init__(self, keep_variance_fn=None): super(MaxPool2d, self).__init__() self._keep_variance_fn = keep_variance_fn def _max_pool_internal(self, mu_a, mu_b, var_a, var_b): stddev = torch.sqrt(var_a + var_b) ab = mu_a - mu_b alpha = ab / stddev pdf = normpdf(alpha) cdf = normcdf(alpha) z_mu = stddev * pdf + ab * cdf + mu_b z_var = (mu_a + mu_b) * stddev * pdf + (mu_a ** 2 + var_a) * cdf + ( mu_b ** 2 + var_b) * (1.0 - cdf) - z_mu ** 2 if self._keep_variance_fn is not None: z_var = self._keep_variance_fn(z_var) return z_mu, z_var def _max_pool_1x2(self, inputs_mean, inputs_variance): mu_a = inputs_mean[:, :, :, 0::2] mu_b = inputs_mean[:, :, :, 1::2] var_a = inputs_variance[:, :, :, 0::2] var_b = inputs_variance[:, :, :, 1::2] outputs_mean, outputs_variance = self._max_pool_internal(mu_a, mu_b, var_a, var_b) return outputs_mean, outputs_variance def _max_pool_2x1(self, inputs_mean, inputs_variance): mu_a = inputs_mean[:, :, 0::2, :] mu_b = inputs_mean[:, :, 1::2, :] var_a = inputs_variance[:, :, 0::2, :] var_b = inputs_variance[:, :, 1::2, :] outputs_mean, outputs_variance = self._max_pool_internal(mu_a, mu_b, var_a, var_b) return outputs_mean, outputs_variance def forward(self, inputs_mean, inputs_variance): z_mean, z_variance = self._max_pool_1x2(inputs_mean, inputs_variance) outputs_mean, outputs_variance = self._max_pool_2x1(z_mean, z_variance) return outputs_mean, outputs_variance def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn from numbers import Number assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 2 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 2 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tmp2 * tmp6 tmp8 = tmp0 - tmp1 tmp9 = tmp8 / tmp6 tmp10 = 0.0 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = -tmp12 tmp14 = 0.5 tmp15 = tmp13 * tmp14 tmp16 = 0.9189385332046727 tmp17 = tmp15 - tmp16 tmp18 = tl_math.exp(tmp17) tmp19 = tmp7 * tmp18 tmp20 = tmp0 * tmp0 tmp21 = tmp20 + tmp3 tmp22 = 1.0 tmp23 = tmp11 * tmp22 tmp24 = 1.414213562373095 tmp25 = tmp23 / tmp24 tmp26 = libdevice.erf(tmp25) tmp27 = tmp26 + tmp22 tmp28 = tmp27 * tmp14 tmp29 = tmp21 * tmp28 tmp30 = tmp19 + tmp29 tmp31 = tmp6 * tmp18 tmp32 = tmp8 * tmp28 tmp33 = tmp31 + tmp32 tmp34 = tmp33 + tmp1 tmp35 = tmp34 * tmp34 tmp36 = tmp1 * tmp1 tmp37 = tmp36 + tmp4 tmp38 = tmp22 - tmp28 tmp39 = tmp37 * tmp38 tmp40 = tmp30 + tmp39 tmp41 = tmp40 - tmp35 tl.store(in_out_ptr0 + x0, tmp41, xmask) @triton.jit def triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr1 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy ='evict_last') tmp33 = tl.load(in_ptr1 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr1 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy ='evict_last') tmp54 = tl.load(in_ptr2 + (x0 + 4 * x1), xmask) tmp55 = tl.load(in_ptr2 + (2 + x0 + 4 * x1), xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.sqrt(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tmp6 / tmp3 tmp8 = 0.0 tmp9 = tmp7 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = -tmp10 tmp12 = 0.5 tmp13 = tmp11 * tmp12 tmp14 = 0.9189385332046727 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp3 * tmp16 tmp18 = 1.0 tmp19 = tmp9 * tmp18 tmp20 = 1.414213562373095 tmp21 = tmp19 / tmp20 tmp22 = libdevice.erf(tmp21) tmp23 = tmp22 + tmp18 tmp24 = tmp23 * tmp12 tmp25 = tmp6 * tmp24 tmp26 = tmp17 + tmp25 tmp27 = tmp26 + tmp5 tmp28 = tmp27 * tmp27 tmp31 = tmp29 + tmp30 tmp32 = libdevice.sqrt(tmp31) tmp35 = tmp33 - tmp34 tmp36 = tmp35 / tmp32 tmp37 = tmp36 - tmp8 tmp38 = tmp37 * tmp37 tmp39 = -tmp38 tmp40 = tmp39 * tmp12 tmp41 = tmp40 - tmp14 tmp42 = tl_math.exp(tmp41) tmp43 = tmp32 * tmp42 tmp44 = tmp37 * tmp18 tmp45 = tmp44 / tmp20 tmp46 = libdevice.erf(tmp45) tmp47 = tmp46 + tmp18 tmp48 = tmp47 * tmp12 tmp49 = tmp35 * tmp48 tmp50 = tmp43 + tmp49 tmp51 = tmp50 + tmp34 tmp52 = tmp51 + tmp27 tmp53 = tmp51 - tmp27 tmp56 = tmp54 + tmp55 tmp57 = libdevice.sqrt(tmp56) tmp58 = tmp53 / tmp57 tmp59 = tmp58 - tmp8 tmp60 = tmp59 * tmp59 tmp61 = -tmp60 tmp62 = tmp61 * tmp12 tmp63 = tmp62 - tmp14 tmp64 = tl_math.exp(tmp63) tmp65 = tmp57 * tmp64 tmp66 = tmp59 * tmp18 tmp67 = tmp66 / tmp20 tmp68 = libdevice.erf(tmp67) tmp69 = tmp68 + tmp18 tmp70 = tmp69 * tmp12 tmp71 = tmp53 * tmp70 tmp72 = tmp65 + tmp71 tmp73 = tmp72 + tmp27 tmp74 = tmp51 * tmp51 tmp75 = tmp52 * tmp57 tmp76 = tmp75 * tmp64 tmp77 = tmp74 + tmp54 tmp78 = tmp77 * tmp70 tmp79 = tmp76 + tmp78 tmp80 = tmp28 + tmp55 tmp81 = tmp18 - tmp70 tmp82 = tmp80 * tmp81 tmp83 = tmp79 + tmp82 tmp84 = tmp73 * tmp73 tmp85 = tmp83 - tmp84 tl.store(out_ptr2 + x2, tmp73, xmask) tl.store(in_out_ptr0 + x2, tmp85, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_0[grid (128)](buf3, arg0_1, arg1_1, 128, XBLOCK=128, num_warps=4, num_stages=1) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf6 = buf0 del buf0 buf9 = buf6 del buf6 triton_poi_fused_add_div_erf_exp_log_mul_neg_pow_rsub_sqrt_sub_1[grid (64)](buf9, arg1_1, arg0_1, buf3, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del buf3 return buf8, buf9 def keep_variance_fn(x): return x + 0.001 def normcdf(value, mu=0.0, stddev=1.0): sinv = 1.0 / stddev if isinstance(stddev, Number) else stddev.reciprocal() return 0.5 * (1.0 + torch.erf((value - mu) * sinv / np.sqrt(2.0))) def _normal_log_pdf(value, mu, stddev): var = stddev ** 2 log_scale = np.log(stddev) if isinstance(stddev, Number) else torch.log( stddev) return -(value - mu) ** 2 / (2.0 * var) - log_scale - np.log(np.sqrt( 2.0 * np.pi)) def normpdf(value, mu=0.0, stddev=1.0): return torch.exp(_normal_log_pdf(value, mu, stddev)) class MaxPool2dNew(nn.Module): def __init__(self, keep_variance_fn=None): super(MaxPool2dNew, self).__init__() self._keep_variance_fn = keep_variance_fn def _max_pool_internal(self, mu_a, mu_b, var_a, var_b): stddev = torch.sqrt(var_a + var_b) ab = mu_a - mu_b alpha = ab / stddev pdf = normpdf(alpha) cdf = normcdf(alpha) z_mu = stddev * pdf + ab * cdf + mu_b z_var = (mu_a + mu_b) * stddev * pdf + (mu_a ** 2 + var_a) * cdf + ( mu_b ** 2 + var_b) * (1.0 - cdf) - z_mu ** 2 if self._keep_variance_fn is not None: z_var = self._keep_variance_fn(z_var) return z_mu, z_var def _max_pool_1x2(self, inputs_mean, inputs_variance): mu_a = inputs_mean[:, :, :, 0::2] mu_b = inputs_mean[:, :, :, 1::2] var_a = inputs_variance[:, :, :, 0::2] var_b = inputs_variance[:, :, :, 1::2] outputs_mean, outputs_variance = self._max_pool_internal(mu_a, mu_b, var_a, var_b) return outputs_mean, outputs_variance def _max_pool_2x1(self, inputs_mean, inputs_variance): mu_a = inputs_mean[:, :, 0::2, :] mu_b = inputs_mean[:, :, 1::2, :] var_a = inputs_variance[:, :, 0::2, :] var_b = inputs_variance[:, :, 1::2, :] outputs_mean, outputs_variance = self._max_pool_internal(mu_a, mu_b, var_a, var_b) return outputs_mean, outputs_variance def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
collector-m/LiDAR-MOS
MaxPool2d
false
15,070
[ "MIT" ]
268
7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
https://github.com/collector-m/LiDAR-MOS/tree/7ccbb63b4ee7c40195b35dd0dddd71473fae25b1
TransformerEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qw/cqw7yoyglmtjad3kirznl5odetqfs3k6pjtnfdbzklyhsdvuvgft.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rh/crhjfwyl6xoj5ylcsbbh6lp2vlegits2zkdej3b3wb2q4ddfnejv.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7m/c7my77j7miwq7j5yz26lhwtp4fyb6qiw2vuvksvbnxxhdrtuljuq.py # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # src => add # src_1 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %squeeze), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uy/cuyacfovgswdpyhlq2s2chxvljavfbdvz7wnuo2oaa6t6ewmxjgf.py # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # src => add # src_1 => add_1, add_2, mul_1, mul_2, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %squeeze), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_7), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_6), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/od/cod7phlvthjbjrlnkoohuubyurn4lshuxh5mfkr4pfo6y6wto4h6.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3m/c3mh4ag5y7d2kfw4id5vjhn3zjt2ucu33pwtmgndlspt4gg5cawj.py # Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add] # Source node to ATen node mapping: # src_2 => add_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {}) triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5m/c5m2x4kwr66u6jzlkjcacrwhzqxhxsn3hv6ryzwol7bzp7uppnze.py # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # src_3 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pg/cpgskb56mehof5k52uslszbldka4jbq52y6dhbe764xtjdj3lwxc.py # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # src_3 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_9), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_12), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_13), kwargs = {}) triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (2048, 4), (4, 1)) assert_size_stride(primals_9, (2048, ), (1, )) assert_size_stride(primals_10, (4, 2048), (2048, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_2, (4, ), (1, ), 4), primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_2, (4, ), (1, ), 8), primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_1 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf3, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_4 buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_4.run(primals_5, buf9, buf10, buf11, 4, grid=grid(4), stream=stream0) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(primals_5, buf9, buf10, buf11, primals_6, primals_7, buf12, 16, grid=grid(16), stream=stream0) del primals_7 buf13 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf12, reinterpret_tensor(primals_8, (4, 2048), (1, 4), 0), out=buf13) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] triton_poi_fused_relu_6.run(buf14, primals_9, 8192, grid=grid(8192), stream=stream0) del primals_9 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf14, reinterpret_tensor(primals_10, (2048, 4), (1, 2048), 0), out=buf15) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add] triton_poi_fused_add_7.run(buf16, buf12, primals_11, 16, grid=grid(16), stream=stream0) del primals_11 buf17 = buf11; del buf11 # reuse buf18 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_8.run(buf16, buf17, buf18, 4, grid=grid(4), stream=stream0) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_9.run(buf16, buf17, buf18, primals_12, primals_13, buf19, 16, grid=grid(16), stream=stream0) del buf17 del buf18 del primals_13 return (buf19, primals_5, primals_6, primals_12, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf12, buf14, buf16, primals_10, primals_8, primals_3, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn.functional as F import torch.nn as nn from torch.nn.init import xavier_uniform_ from torch.nn.init import constant_ from torch.nn.init import xavier_normal_ def _get_activation_fn(activation): if activation == 'relu': return F.relu elif activation == 'gelu': return F.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class MultiheadAttention(nn.Module): __annotations__ = {'bias_k': torch._jit_internal.Optional[torch.Tensor], 'bias_v': torch._jit_internal.Optional[torch.Tensor]} __constants__ = ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight'] def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self._qkv_same_embed_dim = (self.kdim == embed_dim and self.vdim == embed_dim) self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' if self._qkv_same_embed_dim is False: self.q_proj_weight = nn.Parameter(torch.Tensor(embed_dim, embed_dim)) self.k_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self. kdim)) self.v_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self. vdim)) self.register_parameter('in_proj_weight', None) else: self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim, embed_dim)) self.register_parameter('q_proj_weight', None) self.register_parameter('k_proj_weight', None) self.register_parameter('v_proj_weight', None) if bias: self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = nn.Parameter(torch.empty(1, 1, embed_dim)) self.bias_v = nn.Parameter(torch.empty(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self._reset_parameters() def _reset_parameters(self): if self._qkv_same_embed_dim: xavier_uniform_(self.in_proj_weight) else: xavier_uniform_(self.q_proj_weight) xavier_uniform_(self.k_proj_weight) xavier_uniform_(self.v_proj_weight) if self.in_proj_bias is not None: constant_(self.in_proj_bias, 0.0) constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: xavier_normal_(self.bias_k) if self.bias_v is not None: xavier_normal_(self.bias_v) def __setstate__(self, state): if '_qkv_same_embed_dim' not in state: state['_qkv_same_embed_dim'] = True super(MultiheadAttention, self).__setstate__(state) def forward(self, query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None): if not self._qkv_same_embed_dim: return F.multi_head_attention_forward(query, key, value, self. embed_dim, self.num_heads, self.in_proj_weight, self. in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, use_separate_proj_weight=True, q_proj_weight=self. q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight) else: return F.multi_head_attention_forward(query, key, value, self. embed_dim, self.num_heads, self.in_proj_weight, self. in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask) class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu'): super(TransformerEncoderLayer, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = F.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src, src_mask=None, src_key_padding_mask=None): src2 = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return src def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch.nn.functional as F import torch.nn as nn from torch.nn.init import xavier_uniform_ from torch.nn.init import constant_ from torch.nn.init import xavier_normal_ assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (2048, 4), (4, 1)) assert_size_stride(primals_9, (2048,), (1,)) assert_size_stride(primals_10, (4, 2048), (2048, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_2, (4,), (1,), 4), primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_2, (4,), (1,), 8), primals_5, reinterpret_tensor(primals_1, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_1 buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](buf3, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf5 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_4, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf9) del primals_4 buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf11 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(4)](primals_5, buf9, buf10, buf11, 4, XBLOCK=4, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_5, buf9, buf10, buf11, primals_6, primals_7, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf13 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf12, reinterpret_tensor(primals_8, (4, 2048), ( 1, 4), 0), out=buf13) buf14 = buf13 del buf13 triton_poi_fused_relu_6[grid(8192)](buf14, primals_9, 8192, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf14, reinterpret_tensor(primals_10, (2048, 4), (1, 2048), 0), out=buf15) buf16 = buf15 del buf15 triton_poi_fused_add_7[grid(16)](buf16, buf12, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 buf17 = buf11 del buf11 buf18 = buf10 del buf10 triton_poi_fused_native_layer_norm_8[grid(4)](buf16, buf17, buf18, 4, XBLOCK=4, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(16)](buf16, buf17, buf18, primals_12, primals_13, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf17 del buf18 del primals_13 return (buf19, primals_5, primals_6, primals_12, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf12, buf14, buf16, primals_10, primals_8, primals_3, reinterpret_tensor(buf2, ( 4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0)) def _get_activation_fn(activation): if activation == 'relu': return F.relu elif activation == 'gelu': return F.gelu raise RuntimeError('activation should be relu/gelu, not {}'.format( activation)) class MultiheadAttention(nn.Module): __annotations__ = {'bias_k': torch._jit_internal.Optional[torch.Tensor], 'bias_v': torch._jit_internal.Optional[torch.Tensor]} __constants__ = ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight'] def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self._qkv_same_embed_dim = (self.kdim == embed_dim and self.vdim == embed_dim) self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' if self._qkv_same_embed_dim is False: self.q_proj_weight = nn.Parameter(torch.Tensor(embed_dim, embed_dim)) self.k_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self. kdim)) self.v_proj_weight = nn.Parameter(torch.Tensor(embed_dim, self. vdim)) self.register_parameter('in_proj_weight', None) else: self.in_proj_weight = nn.Parameter(torch.empty(3 * embed_dim, embed_dim)) self.register_parameter('q_proj_weight', None) self.register_parameter('k_proj_weight', None) self.register_parameter('v_proj_weight', None) if bias: self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = nn.Parameter(torch.empty(1, 1, embed_dim)) self.bias_v = nn.Parameter(torch.empty(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self._reset_parameters() def _reset_parameters(self): if self._qkv_same_embed_dim: xavier_uniform_(self.in_proj_weight) else: xavier_uniform_(self.q_proj_weight) xavier_uniform_(self.k_proj_weight) xavier_uniform_(self.v_proj_weight) if self.in_proj_bias is not None: constant_(self.in_proj_bias, 0.0) constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: xavier_normal_(self.bias_k) if self.bias_v is not None: xavier_normal_(self.bias_v) def __setstate__(self, state): if '_qkv_same_embed_dim' not in state: state['_qkv_same_embed_dim'] = True super(MultiheadAttention, self).__setstate__(state) def forward(self, query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None): if not self._qkv_same_embed_dim: return F.multi_head_attention_forward(query, key, value, self. embed_dim, self.num_heads, self.in_proj_weight, self. in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, use_separate_proj_weight=True, q_proj_weight=self. q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight) else: return F.multi_head_attention_forward(query, key, value, self. embed_dim, self.num_heads, self.in_proj_weight, self. in_proj_bias, self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, training=self.training, key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask) class TransformerEncoderLayerNew(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu'): super(TransformerEncoderLayerNew, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = F.relu super(TransformerEncoderLayerNew, self).__setstate__(state) def forward(self, input_0): primals_1 = self.self_attn.in_proj_weight primals_2 = self.self_attn.in_proj_bias primals_3 = self.self_attn.out_proj.weight primals_4 = self.self_attn.out_proj.bias primals_8 = self.linear1.weight primals_9 = self.linear1.bias primals_10 = self.linear2.weight primals_6 = self.linear2.bias primals_7 = self.norm1.weight primals_11 = self.norm1.bias primals_12 = self.norm2.weight primals_13 = self.norm2.bias primals_5 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
codeboy5/cvpr20-scatter-text-recognizer
TransformerEncoderLayer
false
15,071
[ "Apache-2.0" ]
63
4bd6cfbd4d7f64ce11864514f6b6b0646267c285
https://github.com/codeboy5/cvpr20-scatter-text-recognizer/tree/4bd6cfbd4d7f64ce11864514f6b6b0646267c285
_MLP_B
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/do/cdo22no4lmipk7byduyah2xsadvdcbfr22puoptl5br3l66r6jra.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # out_2 => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 256, grid=grid(256), stream=stream0) del buf0 del primals_3 buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return (reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class _MLP_B(nn.Module): """MLP that only use age gender MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_B, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, X): out = self.do1(X) out = self.fc1(out) out = self.ac1(out) out = self.do2(out) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'drop_rate': 0.5, 'fil_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_3, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_3 buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), primals_4 class _MLP_BNew(nn.Module): """MLP that only use age gender MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_BNew, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
colorfulbrain/brain2020
_MLP_B
false
15,072
[ "MIT" ]
91
1dde5d34fd2ba1f38bcc38f2c973d167c8c3a168
https://github.com/colorfulbrain/brain2020/tree/1dde5d34fd2ba1f38bcc38f2c973d167c8c3a168
Context2AnswerAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3v/c3v7n6hzyrv5pn6uojl3hf6tko347a672spakigdzmqm7ebd4zwl.py # Topologically Sorted Source Nodes: [context_fc], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # context_fc => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax] # Source node to ATen node mapping: # prob => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_6, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_6, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax] # Source node to ATen node mapping: # prob => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf1) del primals_1 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [context_fc], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf2, buf9, 256, grid=grid(256), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [questions_fc], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, buf8, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [emb], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_4, (16, 4, 4), (16, 4, 1), 0), out=buf7) del buf6 return (reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(primals_4, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), buf8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.multiprocessing import torch.utils.data import torch.nn.modules.loss class Context2AnswerAttention(nn.Module): def __init__(self, dim, hidden_size): super(Context2AnswerAttention, self).__init__() self.linear_sim = nn.Linear(dim, hidden_size, bias=False) def forward(self, context, answers, out_answers, mask_answers=None): """ Parameters :context, (B, L, dim) :answers, (B, N, dim) :mask, (L, N) Returns :ques_emb, (L, dim) """ context_fc = torch.relu(self.linear_sim(context)) questions_fc = torch.relu(self.linear_sim(answers)) attention = torch.matmul(context_fc, questions_fc.transpose(-1, -2)) if mask_answers is not None: attention = attention.masked_fill(~mask_answers.unsqueeze(1). bool(), -Constants.INF) prob = torch.softmax(attention, dim=-1) emb = torch.matmul(prob, out_answers) return emb def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.multiprocessing import torch.utils.data import torch.nn.modules.loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf1) del primals_1 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_4, (16, 4, 4), (16, 4, 1), 0), out=buf7) del buf6 return reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(primals_4, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), buf8, buf9 class Context2AnswerAttentionNew(nn.Module): def __init__(self, dim, hidden_size): super(Context2AnswerAttentionNew, self).__init__() self.linear_sim = nn.Linear(dim, hidden_size, bias=False) def forward(self, input_0, input_1, input_2): primals_1 = self.linear_sim.weight primals_2 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
cminusQAQ/graph4nlp
Context2AnswerAttention
false
15,073
[ "Apache-2.0" ]
1,269
d980e897131f1b9d3766750c06316d94749904fa
https://github.com/cminusQAQ/graph4nlp/tree/d980e897131f1b9d3766750c06316d94749904fa
HardMish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tw/ctw5blnsowp77ws5q2hhofrkj4pqx7shwotcrzfadzfujngnzob7.py # Topologically Sorted Source Nodes: [truediv, add, clamp, mul], Original ATen: [aten.div, aten.add, aten.clamp, aten.mul] # Source node to ATen node mapping: # add => add # clamp => clamp_max, clamp_min # mul => mul # truediv => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 2), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %clamp_max), kwargs = {}) triton_poi_fused_add_clamp_div_mul_0 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 2.0 tmp4 = tmp0 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = triton_helpers.minimum(tmp6, tmp3) tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv, add, clamp, mul], Original ATen: [aten.div, aten.add, aten.clamp, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_clamp_div_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class HardMish(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x / 2 * torch.clamp(x + 2, min=0, max=2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_div_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 2.0 tmp4 = tmp0 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = triton_helpers.minimum(tmp6, tmp3) tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_div_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class HardMishNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cooked-sashimi/Yet-Another-YOLOv4-Pytorch
HardMish
false
15,074
[ "MIT" ]
133
c884ef8849987a75b0e17eba1b739c22d3782e90
https://github.com/cooked-sashimi/Yet-Another-YOLOv4-Pytorch/tree/c884ef8849987a75b0e17eba1b739c22d3782e90
_MLP_C
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py # Topologically Sorted Source Nodes: [X], Original ATen: [aten.cat] # Source node to ATen node mapping: # X => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/t6/ct6tzc7qfnkzfnoub66azvnnyh3hy6gkxahfs6n3mwiip73xwian.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # out_2 => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (2, 4), (4, 1)) assert_size_stride(primals_6, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [X], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf1, primals_4, buf2, buf3, 512, grid=grid(512), stream=stream0) del buf1 del primals_4 buf4 = empty_strided_cuda((128, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return (reinterpret_tensor(buf4, (4, 8, 4, 2), (64, 8, 2, 1), 0), reinterpret_tensor(buf0, (128, 4), (4, 1), 0), buf2, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class _MLP_C(nn.Module): """MLP that use DPMs from fcn and age, gender and MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_C, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, X1, X2): X = torch.cat((X1, X2), 1) out = self.do1(X) out = self.fc1(out) out = self.ac1(out) out = self.do2(out) out = self.fc2(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_size': 4, 'drop_rate': 0.5, 'fil_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (2, 4), (4, 1)) assert_size_stride(primals_6, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(512)](buf1, primals_4, buf2, buf3, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_4 buf4 = empty_strided_cuda((128, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return reinterpret_tensor(buf4, (4, 8, 4, 2), (64, 8, 2, 1), 0 ), reinterpret_tensor(buf0, (128, 4), (4, 1), 0 ), buf2, reinterpret_tensor(buf3, (128, 4), (4, 1), 0), primals_5 class _MLP_CNew(nn.Module): """MLP that use DPMs from fcn and age, gender and MMSE""" def __init__(self, in_size, drop_rate, fil_num): super(_MLP_CNew, self).__init__() self.fc1 = nn.Linear(in_size, fil_num) self.fc2 = nn.Linear(fil_num, 2) self.do1 = nn.Dropout(drop_rate) self.do2 = nn.Dropout(drop_rate) self.ac1 = nn.LeakyReLU() def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
colorfulbrain/brain2020
_MLP_C
false
15,075
[ "MIT" ]
91
1dde5d34fd2ba1f38bcc38f2c973d167c8c3a168
https://github.com/colorfulbrain/brain2020/tree/1dde5d34fd2ba1f38bcc38f2c973d167c8c3a168
DarknetMish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pv/cpvfag6tbt6nhgi35vs4qvpphq46yqnsc27wknjmt3qwt3uqetaf.py # Topologically Sorted Source Nodes: [e, mul, mul_1, n, add_1, truediv, mul_2, mask], Original ATen: [aten.exp, aten.mul, aten.add, aten.div, aten.le] # Source node to ATen node mapping: # add_1 => add_1 # e => exp # mask => le # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # n => add # truediv => div # Graph fragment: # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %exp), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, 2), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 2), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %div), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%arg0_1, -0.6), kwargs = {}) triton_poi_fused_add_div_exp_le_mul_0 = async_compile.triton('triton_poi_fused_add_div_exp_le_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_le_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_exp_le_mul_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.exp(tmp0) tmp2 = tmp1 * tmp1 tmp3 = 2.0 tmp4 = tmp1 * tmp3 tmp5 = tmp2 + tmp4 tmp6 = tmp5 + tmp3 tmp7 = tmp5 / tmp6 tmp8 = tmp0 * tmp7 tmp9 = -0.6 tmp10 = tmp0 <= tmp9 tl.store(out_ptr0 + (x0), tmp5, xmask) tl.store(out_ptr1 + (x0), tmp8, xmask) tl.store(out_ptr2 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [e, mul, mul_1, n, add_1, truediv, mul_2, mask], Original ATen: [aten.exp, aten.mul, aten.add, aten.div, aten.le] stream0 = get_raw_stream(0) triton_poi_fused_add_div_exp_le_mul_0.run(arg0_1, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf1, buf2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class darknet_mish(torch.autograd.Function): """ We can implement our own custom autograd Functions by subclassing torch.autograd.Function and implementing the forward and backward passes which operate on Tensors. """ @staticmethod def forward(ctx, input): """ In the forward pass we receive a Tensor containing the input and return a Tensor containing the output. ctx is a context object that can be used to stash information for backward computation. You can cache arbitrary objects for use in the backward pass using the ctx.save_for_backward method. """ ctx.save_for_backward(input) e = torch.exp(input) n = e * e + 2 * e mask = input <= -0.6 input[mask] = (input * (n / (n + 2)))[mask] input[~mask] = (input - 2 * (input / (n + 2)))[~mask] return input @staticmethod def backward(ctx, grad_output): """ In the backward pass we receive a Tensor containing the gradient of the loss with respect to the output, and we need to compute the gradient of the loss with respect to the input. """ input, = ctx.saved_tensors sp = F.softplus(input) grad_sp = -torch.expm1(sp) tsp = F.tanh(sp) grad_tsp = (1 - tsp * tsp) * grad_sp grad = input * grad_tsp + tsp return grad class DarknetMish(nn.Module): def __init__(self): super().__init__() def forward(self, x): return darknet_mish.apply(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_exp_le_mul_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = tmp1 * tmp1 tmp3 = 2.0 tmp4 = tmp1 * tmp3 tmp5 = tmp2 + tmp4 tmp6 = tmp5 + tmp3 tmp7 = tmp5 / tmp6 tmp8 = tmp0 * tmp7 tmp9 = -0.6 tmp10 = tmp0 <= tmp9 tl.store(out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr1 + x0, tmp8, xmask) tl.store(out_ptr2 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_div_exp_le_mul_0[grid(256)](arg0_1, buf0, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf1, buf2, buf0 class darknet_mish(torch.autograd.Function): """ We can implement our own custom autograd Functions by subclassing torch.autograd.Function and implementing the forward and backward passes which operate on Tensors. """ @staticmethod def forward(ctx, input): """ In the forward pass we receive a Tensor containing the input and return a Tensor containing the output. ctx is a context object that can be used to stash information for backward computation. You can cache arbitrary objects for use in the backward pass using the ctx.save_for_backward method. """ ctx.save_for_backward(input) e = torch.exp(input) n = e * e + 2 * e mask = input <= -0.6 input[mask] = (input * (n / (n + 2)))[mask] input[~mask] = (input - 2 * (input / (n + 2)))[~mask] return input @staticmethod def backward(ctx, grad_output): """ In the backward pass we receive a Tensor containing the gradient of the loss with respect to the output, and we need to compute the gradient of the loss with respect to the input. """ input, = ctx.saved_tensors sp = F.softplus(input) grad_sp = -torch.expm1(sp) tsp = F.tanh(sp) grad_tsp = (1 - tsp * tsp) * grad_sp grad = input * grad_tsp + tsp return grad class DarknetMishNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
cooked-sashimi/Yet-Another-YOLOv4-Pytorch
DarknetMish
false
15,076
[ "MIT" ]
133
c884ef8849987a75b0e17eba1b739c22d3782e90
https://github.com/cooked-sashimi/Yet-Another-YOLOv4-Pytorch/tree/c884ef8849987a75b0e17eba1b739c22d3782e90
Tanh2
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gy/cgya3aw4qvdn7u65ddx3hzcxo7cps3p536l46btftxxghv5myjts.py # Topologically Sorted Source Nodes: [tanh, add, truediv], Original ATen: [aten.tanh, aten.add, aten.div] # Source node to ATen node mapping: # add => add # tanh => tanh # truediv => div # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%arg0_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {}) triton_poi_fused_add_div_tanh_0 = async_compile.triton('triton_poi_fused_add_div_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, add, truediv], Original ATen: [aten.tanh, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch.nn.parallel import torch.optim class Tanh2(nn.Module): def __init__(self): super(Tanh2, self).__init__() self.tanh = nn.Tanh() def forward(self, x): return (self.tanh(x) + 1) / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn import torch.nn.parallel import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class Tanh2New(nn.Module): def __init__(self): super(Tanh2New, self).__init__() self.tanh = nn.Tanh() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
csyxwei/FFWM
Tanh2
false
15,077
[ "MIT" ]
83
d42c578cabe1b81c6b1bb0c3cb707b190fca3c68
https://github.com/csyxwei/FFWM/tree/d42c578cabe1b81c6b1bb0c3cb707b190fca3c68
SAM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py # Topologically Sorted Source Nodes: [spatial_features], Original ATen: [aten.convolution] # Source node to ATen node mapping: # spatial_features => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7h/c7husn6r7wfamgfyf2k2iu5piakn3x2emjsjmxss2yr2i6zyv32b.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %primals_3), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (x3), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [spatial_features], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [spatial_features], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0) return (buf2, primals_1, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class SAM(nn.Module): def __init__(self, in_channels): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels=1, kernel_size=1) def forward(self, x): spatial_features = self.conv(x) attention = torch.sigmoid(spatial_features) return attention.expand_as(x) * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr1 + x3, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_1[grid(256)](buf1, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1 class SAMNew(nn.Module): def __init__(self, in_channels): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels=1, kernel_size=1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
cooked-sashimi/Yet-Another-YOLOv4-Pytorch
SAM
false
15,078
[ "MIT" ]
133
c884ef8849987a75b0e17eba1b739c22d3782e90
https://github.com/cooked-sashimi/Yet-Another-YOLOv4-Pytorch/tree/c884ef8849987a75b0e17eba1b739c22d3782e90
GlobalAttentionGeneral
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qi/cqinh332474qtv7bgen4bcfz2yfclns66jnudr7z7wmvlrgqoduc.py # Topologically Sorted Source Nodes: [targetT], Original ATen: [aten.clone, aten.transpose] # Source node to ATen node mapping: # targetT => clone # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %permute_5 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%clone, [0, 2, 1]), kwargs = {}) triton_poi_fused_clone_transpose_0 = async_compile.triton('triton_poi_fused_clone_transpose_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_transpose_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x1 + (16*y0)), xmask & ymask) tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + (4*x1) + (64*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pm/cpmy57yidxxfl6wmlh5dsizlsat4uz6k43rz6t4r6h2u4z625i5l.py # Topologically Sorted Source Nodes: [attn_4], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn_4 => clone_1 # Graph fragment: # %clone_1 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr0 + ((4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + (16*y3)), tmp8, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1)) buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [targetT], Original ATen: [aten.clone, aten.transpose] stream0 = get_raw_stream(0) triton_poi_fused_clone_transpose_0.run(primals_1, buf1, buf6, 16, 16, grid=grid(16, 16), stream=stream0) del primals_1 buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [targetT, attn], Original ATen: [aten.clone, aten.bmm] extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_4], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf3, buf4, 16, 16, grid=grid(16, 16), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [weightedContext], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf4, out=buf5) return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class GlobalAttentionGeneral(nn.Module): def __init__(self, idf, cdf): super(GlobalAttentionGeneral, self).__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input, context): """ input: batch x idf x ih x iw (queryL=ihxiw) context: batch x cdf x sourceL """ ih, iw = input.size(2), input.size(3) queryL = ih * iw batch_size, sourceL = context.size(0), context.size(2) target = input.view(batch_size, -1, queryL) targetT = torch.transpose(target, 1, 2).contiguous() sourceT = context.unsqueeze(3) sourceT = self.conv_context(sourceT).squeeze(3) attn = torch.bmm(targetT, sourceT) attn = attn.view(batch_size * queryL, sourceL) if self.mask is not None: mask = self.mask.repeat(queryL, 1) attn.data.masked_fill_(mask.data, -float('inf')) attn = self.sm(attn) attn = attn.view(batch_size, queryL, sourceL) attn = torch.transpose(attn, 1, 2).contiguous() weightedContext = torch.bmm(sourceT, attn) weightedContext = weightedContext.view(batch_size, -1, ih, iw) attn = attn.view(batch_size, -1, ih, iw) return weightedContext, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'idf': 4, 'cdf': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask) tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + 4 * x1 + 64 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding= (0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1)) buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_clone_transpose_0[grid(16, 16)](primals_1, buf1, buf6, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK= 16, YBLOCK=16, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf4, out=buf5) return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6 def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class GlobalAttentionGeneralNew(nn.Module): def __init__(self, idf, cdf): super(GlobalAttentionGeneralNew, self).__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input_0, input_1): primals_3 = self.conv_context.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
comtalyst/multi-gan-material-defects
GlobalAttentionGeneral
false
15,079
[ "MIT" ]
112
aa1c9d4b918b5b5ad7f5fe03fdceec91a66e1007
https://github.com/comtalyst/multi-gan-material-defects/tree/aa1c9d4b918b5b5ad7f5fe03fdceec91a66e1007
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/oy/coyhsh2gonfcpjxzlrgxzb2cyfxeyqdlyipiltkiqn3cy2uxjzy4.py # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # out => convolution # out_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 2 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nl/cnlc6yjkbua5jkop4rrww37vaigeaa5rgiz5dbh7hjclxg6xrxjb.py # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # out_4 => convolution_2 # out_5 => sigmoid # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (2, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_5, (2, ), (1, )) assert_size_stride(primals_6, (4, 2, 1, 1), (2, 1, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 128, grid=grid(128), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 4, 4), (32, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 128, grid=grid(128), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_1.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, 2, 3, 3), (18, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 2, 1, 1), (2, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Attention(nn.Module): def __init__(self, in_channels): super(Attention, self).__init__() self.out_channels = int(in_channels / 2) self.conv1 = nn.Conv2d(in_channels, self.out_channels, kernel_size= 3, padding=1, stride=1) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, padding=1, stride=1) self.relu2 = nn.ReLU() self.conv3 = nn.Conv2d(self.out_channels, 4, kernel_size=1, padding =0, stride=1) self.sigmod = nn.Sigmoid() def forward(self, x): out = self.conv1(x) out = self.relu1(out) out = self.conv2(out) out = self.relu2(out) out = self.conv3(out) out = self.sigmod(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 2 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (2, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 2, 3, 3), (18, 9, 3, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (4, 2, 1, 1), (2, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(128)](buf1, primals_2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 2, 4, 4), (32, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(128)](buf3, primals_5, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_sigmoid_1[grid(256)](buf5, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5 class AttentionNew(nn.Module): def __init__(self, in_channels): super(AttentionNew, self).__init__() self.out_channels = int(in_channels / 2) self.conv1 = nn.Conv2d(in_channels, self.out_channels, kernel_size= 3, padding=1, stride=1) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, padding=1, stride=1) self.relu2 = nn.ReLU() self.conv3 = nn.Conv2d(self.out_channels, 4, kernel_size=1, padding =0, stride=1) self.sigmod = nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
createnewdemo/SPANet
Attention
false
15,080
[ "BSD-3-Clause" ]
177
86cfb05d1778cf30142ef30692e995a5b7b59bb8
https://github.com/createnewdemo/SPANet/tree/86cfb05d1778cf30142ef30692e995a5b7b59bb8
Bottleneck
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/yw/cywcz4pxnzyvlsoydzxcj5pzlu3i5g7qgj7guhgyvlrzkngzehmv.py # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # input_2 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) return (buf4, primals_1, primals_2, primals_3, primals_4, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from collections import OrderedDict class Bottleneck(nn.Module): def __init__(self, in_channels, out_channels): super(Bottleneck, self).__init__() m = OrderedDict() m['conv1'] = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) m['relu1'] = nn.ReLU(True) m['conv2'] = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=2, bias=False, dilation=2) m['relu2'] = nn.ReLU(True) m['conv3'] = nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False) self.group1 = nn.Sequential(m) self.relu = nn.Sequential(nn.ReLU(True)) def forward(self, x): out = self.group1(x) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps =4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(256)](buf3, 256, XBLOCK=128, num_warps =4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) return buf4, primals_1, primals_2, primals_3, primals_4, buf1, buf3 class BottleneckNew(nn.Module): def __init__(self, in_channels, out_channels): super(BottleneckNew, self).__init__() m = OrderedDict() m['conv1'] = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) m['relu1'] = nn.ReLU(True) m['conv2'] = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=2, bias=False, dilation=2) m['relu2'] = nn.ReLU(True) m['conv3'] = nn.Conv2d(out_channels, out_channels, kernel_size=1, bias=False) self.group1 = nn.Sequential(m) self.relu = nn.Sequential(nn.ReLU(True)) def forward(self, input_0): primals_1 = self.group1.conv1.weight primals_3 = self.group1.conv2.weight primals_4 = self.group1.conv3.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
createnewdemo/SPANet
Bottleneck
false
15,081
[ "BSD-3-Clause" ]
177
86cfb05d1778cf30142ef30692e995a5b7b59bb8
https://github.com/createnewdemo/SPANet/tree/86cfb05d1778cf30142ef30692e995a5b7b59bb8
FeatureExtractFF
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/76/c76b7pvliatf567rdp2oax5m3ndboc4zya7ykx425ffbg3x3o7oo.py # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # input_2 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 960 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 15 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (15, 4), (4, 1)) assert_size_stride(primals_2, (15, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 15), (15, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 15), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 15), (240, 60, 15, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 15), (240, 60, 15, 1), torch.bool) # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 960, grid=grid(960), stream=stream0) del primals_2 return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((15, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((15, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class FeatureExtractFF(nn.Module): def __init__(self, input_dim, hidden_sizes=(15,), activation_fn=nn.ReLU, **activation_args): super(FeatureExtractFF, self).__init__() self._in = input_dim self._hidden_sizes = hidden_sizes self._activation_fn = activation_fn self._activation_args = activation_args self.feature = nn.Sequential() hin = self._in for i, h in enumerate(self._hidden_sizes): self.feature.add_module(f'f_fc{i}', nn.Linear(hin, h)) self.feature.add_module(f'f_{activation_fn.__name__}{i}', activation_fn(**activation_args)) hin = h self._out_features = hin def forward(self, input_data): return self.feature(input_data) def extra_repr(self): return f'FC: {self.hidden_sizes}x{self._activation_fn.__name__}' def hidden_layer(self, index=0): return self.feature[index * 2] def output_size(self): return self._out_features def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 960 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 15 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (15, 4), (4, 1)) assert_size_stride(primals_2, (15,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 15), (15, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 15), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 15), (240, 60, 15, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 15), (240, 60, 15, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(960)](buf1, primals_2, buf2, 960, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class FeatureExtractFFNew(nn.Module): def __init__(self, input_dim, hidden_sizes=(15,), activation_fn=nn.ReLU, **activation_args): super(FeatureExtractFFNew, self).__init__() self._in = input_dim self._hidden_sizes = hidden_sizes self._activation_fn = activation_fn self._activation_args = activation_args self.feature = nn.Sequential() hin = self._in for i, h in enumerate(self._hidden_sizes): self.feature.add_module(f'f_fc{i}', nn.Linear(hin, h)) self.feature.add_module(f'f_{activation_fn.__name__}{i}', activation_fn(**activation_args)) hin = h self._out_features = hin def extra_repr(self): return f'FC: {self.hidden_sizes}x{self._activation_fn.__name__}' def hidden_layer(self, index=0): return self.feature[index * 2] def output_size(self): return self._out_features def forward(self, input_0): primals_1 = self.feature.f_fc0.weight primals_2 = self.feature.f_fc0.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
criteo-research/pytorch-ada
FeatureExtractFF
false
15,082
[ "Apache-2.0" ]
68
4b8861ce1c12fc8a4391eb14a811459e3e8a074a
https://github.com/criteo-research/pytorch-ada/tree/4b8861ce1c12fc8a4391eb14a811459e3e8a074a
Conv2dWS
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/yj/cyjqxrdr34zdlpnaqepj4py4tvwh2ebdslxkfeu7skxqjn4syiak.py # Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # mean_1 => mean_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2], True), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp10 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp12 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp14 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp18 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp23 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp27 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp28 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp30 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp32 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + (x2), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2p/c2pf6oyiaa4gjc32fbgpszs3fx6atdtcxl52qihchu3e7nkbbkjc.py # Topologically Sorted Source Nodes: [weight_mean, weight, var, add, sqrt, weight_1], Original ATen: [aten.mean, aten.sub, aten.var, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add => add # sqrt => sqrt # var => var # weight => sub # weight_1 => div # weight_mean => mean_2 # Graph fragment: # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_1, [3], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean_2), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [1]), kwargs = {correction: 1}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 2e-05), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %expand), kwargs = {}) triton_per_fused_add_div_mean_sqrt_sub_var_1 = async_compile.triton('triton_per_fused_add_div_mean_sqrt_sub_var_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_sqrt_sub_var_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_sqrt_sub_var_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tl.full([XBLOCK, 1], 64, tl.int32) tmp19 = tmp18.to(tl.float32) tmp20 = tmp17 / tmp19 tmp21 = tmp11 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.where(xmask, tmp23, 0) tmp26 = tl.sum(tmp25, 1)[:, None] tmp27 = 63.0 tmp28 = tmp26 / tmp27 tmp29 = 2e-05 tmp30 = tmp28 + tmp29 tmp31 = libdevice.sqrt(tmp30) tmp32 = 1e-05 tmp33 = tmp31 + tmp32 tmp34 = tmp10 / tmp33 tl.store(out_ptr0 + (r1 + (64*x0)), tmp10, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp31, xmask) tl.store(out_ptr1 + (r1 + (64*x0)), tmp34, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %div, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) buf5 = buf3; del buf3 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_mean, weight, var, add, sqrt, weight_1], Original ATen: [aten.mean, aten.sub, aten.var, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_mean_sqrt_sub_var_1.run(buf5, primals_1, buf0, buf1, buf6, 4, 64, grid=grid(4), stream=stream0) del buf0 del buf1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf8, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 return (buf8, primals_1, primals_3, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class Conv2dWS(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dWS, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, x): weight = self.weight weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True ).mean(dim=3, keepdim=True) weight = weight - weight_mean std = torch.sqrt(torch.var(weight.view(weight.size(0), -1), dim=1) + 2e-05).view(-1, 1, 1, 1) + 1e-05 weight = weight / std.expand_as(weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp28 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp30 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + x2, tmp36, xmask) @triton.jit def triton_per_fused_add_div_mean_sqrt_sub_var_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tl.where(xmask, tmp11, 0) tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tl.full([XBLOCK, 1], 64, tl.int32) tmp19 = tmp18.to(tl.float32) tmp20 = tmp17 / tmp19 tmp21 = tmp11 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.where(xmask, tmp23, 0) tmp26 = tl.sum(tmp25, 1)[:, None] tmp27 = 63.0 tmp28 = tmp26 / tmp27 tmp29 = 2e-05 tmp30 = tmp28 + tmp29 tmp31 = libdevice.sqrt(tmp30) tmp32 = 1e-05 tmp33 = tmp31 + tmp32 tmp34 = tmp10 / tmp33 tl.store(out_ptr0 + (r1 + 64 * x0), tmp10, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp31, xmask) tl.store(out_ptr1 + (r1 + 64 * x0), tmp34, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) buf5 = buf3 del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused_add_div_mean_sqrt_sub_var_1[grid(4)](buf5, primals_1, buf0, buf1, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(16)](buf8, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf8, primals_1, primals_3, buf5, buf6 class Conv2dWSNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dWSNew, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
cooked-sashimi/Yet-Another-YOLOv4-Pytorch
Conv2dWS
false
15,083
[ "MIT" ]
133
c884ef8849987a75b0e17eba1b739c22d3782e90
https://github.com/cooked-sashimi/Yet-Another-YOLOv4-Pytorch/tree/c884ef8849987a75b0e17eba1b739c22d3782e90
InnerProductModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jb/cjbh6emqja7rl4lagss2czst5sqfxza2dkqgllp2soznirengezy.py # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mv] # Source node to ATen node mapping: # linear => mul, sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %primals_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mv_0 = async_compile.triton('triton_poi_fused_mv_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mv_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1)) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2)) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + (x0), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mv] stream0 = get_raw_stream(0) triton_poi_fused_mv_0.run(primals_2, primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 return (reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class InnerProductModel(torch.nn.Module): @staticmethod def is_valid_model_type(model_type): raise NotImplementedError @staticmethod def get_model_from_type(model_type): raise NotImplementedError @property def loss_criterion(self): return torch.nn.MSELoss() def __init__(self, n): super().__init__() self.layer = torch.nn.Linear(n, 1, bias=False) self.layer.weight.data = torch.arange(n, dtype=torch.float32) def forward(self, x): return self.layer(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mv_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 1) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + 2) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + 3) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp7 = tmp4 * tmp6 tmp8 = tmp3 + tmp7 tmp12 = tmp9 * tmp11 tmp13 = tmp8 + tmp12 tmp17 = tmp14 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x0, tmp18, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_mv_0[grid(64)](primals_2, primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 return reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), primals_2 class InnerProductModelNew(torch.nn.Module): @staticmethod def is_valid_model_type(model_type): raise NotImplementedError @staticmethod def get_model_from_type(model_type): raise NotImplementedError @property def loss_criterion(self): return torch.nn.MSELoss() def __init__(self, n): super().__init__() self.layer = torch.nn.Linear(n, 1, bias=False) self.layer.weight.data = torch.arange(n, dtype=torch.float32) def forward(self, input_0): primals_1 = self.layer.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
cuiboyuan/plato
InnerProductModel
false
15,084
[ "Apache-2.0" ]
135
260b785cbbf8588c92331d6343211ff72321f90e
https://github.com/cuiboyuan/plato/tree/260b785cbbf8588c92331d6343211ff72321f90e
Myloss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mk/cmkul7i5ksf46ar3uswera5oprn3aapolpbjdhllcml5ahczprkj.py # Topologically Sorted Source Nodes: [neg, add, log, mul, sub, sub_1, add_1, log_1, mul_1, entropy, mul_2, sum_1, truediv], Original ATen: [aten.neg, aten.add, aten.log, aten.mul, aten.rsub, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # entropy => sub_2 # log => log # log_1 => log_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # neg => neg # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # truediv => div # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1e-08), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %log), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-08), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %log_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %arg2_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 2), kwargs = {}) triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp14 = tl.load(in_ptr2 + (r0), None) tmp1 = -tmp0 tmp3 = 1e-08 tmp4 = tmp2 + tmp3 tmp5 = tl_math.log(tmp4) tmp6 = tmp1 * tmp5 tmp7 = 1.0 tmp8 = tmp7 - tmp0 tmp9 = tmp7 - tmp2 tmp10 = tmp9 + tmp3 tmp11 = tl_math.log(tmp10) tmp12 = tmp8 * tmp11 tmp13 = tmp6 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = 0.5 tmp20 = tmp18 * tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [neg, add, log, mul, sub, sub_1, add_1, log_1, mul_1, entropy, mul_2, sum_1, truediv], Original ATen: [aten.neg, aten.add, aten.log, aten.mul, aten.rsub, aten.sub, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0.run(buf1, arg0_1, arg1_1, arg2_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Myloss(nn.Module): def __init__(self, epsilon=1e-08): super(Myloss, self).__init__() self.epsilon = epsilon return def forward(self, input_, label, weight): entropy = -label * torch.log(input_ + self.epsilon) - (1 - label ) * torch.log(1 - input_ + self.epsilon) return torch.sum(entropy * weight) / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp14 = tl.load(in_ptr2 + r0, None) tmp1 = -tmp0 tmp3 = 1e-08 tmp4 = tmp2 + tmp3 tmp5 = tl_math.log(tmp4) tmp6 = tmp1 * tmp5 tmp7 = 1.0 tmp8 = tmp7 - tmp0 tmp9 = tmp7 - tmp2 tmp10 = tmp9 + tmp3 tmp11 = tl_math.log(tmp10) tmp12 = tmp8 * tmp11 tmp13 = tmp6 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = 0.5 tmp20 = tmp18 * tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_log_mul_neg_rsub_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class MylossNew(nn.Module): def __init__(self, epsilon=1e-08): super(MylossNew, self).__init__() self.epsilon = epsilon return def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
cuishuhao/HDA
Myloss
false
15,085
[ "Apache-2.0" ]
58
1733ca74eee7839b455e9ffd7a169bc54b272745
https://github.com/cuishuhao/HDA/tree/1733ca74eee7839b455e9ffd7a169bc54b272745
AconC
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/te/ctevscmztmeqanhbazxgk27ctyy2bh4pbqsqpvoymuxkfobwmsus.py # Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub] # Source node to ATen node mapping: # sub => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4y/c4yb2uqsmfgwhifdn3lgvi3pjuiqdjy2tmjjh74dyzsjvym4fjkl.py # Topologically Sorted Source Nodes: [dpx, mul_1, sigmoid, mul_2, mul_3, add], Original ATen: [aten.mul, aten.sigmoid, aten.add] # Source node to ATen node mapping: # add => add # dpx => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # sigmoid => sigmoid # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %mul), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sigmoid), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp6 = tmp2 * tmp5 tmp8 = tmp7 * tmp1 tmp9 = tmp6 + tmp8 tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_sub_0.run(primals_1, primals_2, buf0, 4, grid=grid(4), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dpx, mul_1, sigmoid, mul_2, mul_3, add], Original ATen: [aten.mul, aten.sigmoid, aten.add] triton_poi_fused_add_mul_sigmoid_1.run(buf0, primals_3, primals_4, primals_2, buf1, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_3, primals_4, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AconC(nn.Module): """ ACON activation (activate or not). AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. """ def __init__(self, c1): super().__init__() self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) def forward(self, x): dpx = (self.p1 - self.p2) * x return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c1': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp2 tmp5 = tl.sigmoid(tmp4) tmp6 = tmp2 * tmp5 tmp8 = tmp7 * tmp1 tmp9 = tmp6 + tmp8 tl.store(out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(4)](primals_1, primals_2, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_1[grid(256)](buf0, primals_3, primals_4, primals_2, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_3, primals_4, buf0 class AconCNew(nn.Module): """ ACON activation (activate or not). AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. """ def __init__(self, c1): super().__init__() self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) def forward(self, input_0): primals_1 = self.p1 primals_2 = self.p2 primals_4 = self.beta primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
cuiboyuan/plato
AconC
false
15,086
[ "Apache-2.0" ]
135
260b785cbbf8588c92331d6343211ff72321f90e
https://github.com/cuiboyuan/plato/tree/260b785cbbf8588c92331d6343211ff72321f90e
ECA
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/a4/ca4pk5bet2ap5zhlcm7x5qhkcykaonjnkexgc274irmy5c3x7nr6.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %primals_1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (x2), xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 1, 4), (4, 0, 1), 0), primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4), (4, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(buf2, primals_1, buf3, 256, grid=grid(256), stream=stream0) return (buf3, primals_1, primals_2, reinterpret_tensor(buf1, (4, 1, 4), (4, 1, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 3), (3, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class FastGlobalAvgPool2d: def __init__(self, flatten=False): self.flatten = flatten def __call__(self, x): if self.flatten: in_size = x.size() return x.view((in_size[0], in_size[1], -1)).mean(dim=2) else: return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1) class ECA(nn.Module): def __init__(self, k_size=3): super().__init__() self.avg_pool = FastGlobalAvgPool2d(flatten=False) self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1 ) // 2, bias=False) def forward(self, x): squized_channels = self.avg_pool(x) channel_features = self.conv(squized_channels.squeeze(-1).transpose (-1, -2)).transpose(-1, -2).unsqueeze(-1) attention = torch.sigmoid(channel_features) return attention.expand_as(x) * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3), (3, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 1, 4 ), (4, 0, 1), 0), primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4), (4, 4, 1)) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_1[grid(256)](buf2, primals_1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, reinterpret_tensor(buf1, (4, 1, 4), (4, 1, 1), 0), buf2 class FastGlobalAvgPool2d: def __init__(self, flatten=False): self.flatten = flatten def __call__(self, x): if self.flatten: in_size = x.size() return x.view((in_size[0], in_size[1], -1)).mean(dim=2) else: return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1) class ECANew(nn.Module): def __init__(self, k_size=3): super().__init__() self.avg_pool = FastGlobalAvgPool2d(flatten=False) self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1 ) // 2, bias=False) def forward(self, input_0): primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
cooked-sashimi/Yet-Another-YOLOv4-Pytorch
ECA
false
15,087
[ "MIT" ]
133
c884ef8849987a75b0e17eba1b739c22d3782e90
https://github.com/cooked-sashimi/Yet-Another-YOLOv4-Pytorch/tree/c884ef8849987a75b0e17eba1b739c22d3782e90
BinaryFocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gt/cgtd5xj24ijmtujfbliico4vniipimootox5jtn7g3yeixybzukc.py # Topologically Sorted Source Nodes: [mul, input_1, sub, pow_1, mul_1, log, mul_2, sub_1, mul_3, pow_2, mul_4, sub_2, log_1, mul_5, sub_3, loss, loss_1], Original ATen: [aten.mul, aten.clamp, aten.rsub, aten.pow, aten.log, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # input_1 => clamp_max, clamp_min # log => log # log_1 => log_1 # loss => sum_1 # loss_1 => div # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # pow_1 => pow_1 # pow_2 => pow_2 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # sub_3 => sub_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, -0.25), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 1e-06), kwargs = {}) # %clamp_max : [num_users=4] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 0.999999), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clamp_max), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %pow_1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_max,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 0.75), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_max, 2.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %pow_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clamp_max), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_2,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %log_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_5), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 256), kwargs = {}) triton_per_fused_clamp_div_log_mul_pow_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_clamp_div_log_mul_pow_rsub_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_div_log_mul_pow_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_clamp_div_log_mul_pow_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = -0.25 tmp2 = tmp0 * tmp1 tmp4 = 1e-06 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = 0.999999 tmp7 = triton_helpers.minimum(tmp5, tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp2 * tmp10 tmp12 = tl_math.log(tmp7) tmp13 = tmp11 * tmp12 tmp14 = tmp8 - tmp0 tmp15 = 0.75 tmp16 = tmp14 * tmp15 tmp17 = tmp7 * tmp7 tmp18 = tmp16 * tmp17 tmp19 = tl_math.log(tmp9) tmp20 = tmp18 * tmp19 tmp21 = tmp13 - tmp20 tmp22 = tl.broadcast_to(tmp21, [RBLOCK]) tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0)) tmp25 = 0.00390625 tmp26 = tmp24 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp26, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, input_1, sub, pow_1, mul_1, log, mul_2, sub_1, mul_3, pow_2, mul_4, sub_2, log_1, mul_5, sub_3, loss, loss_1], Original ATen: [aten.mul, aten.clamp, aten.rsub, aten.pow, aten.log, aten.sub, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_clamp_div_log_mul_pow_rsub_sub_sum_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch as th import torch.nn as nn class BinaryFocalLoss(nn.Module): def __init__(self, gamma=2.0, alpha=0.25, size_average=True): super(BinaryFocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.size_average = size_average def forward(self, input, target, weight=None): if weight is not None: assert weight.size() == input.size( ), f'weight size: {weight.size()}, input size: {input.size()}' assert (weight >= 0).all() and (weight <= 1).all( ), f'weight max: {weight.max()}, min: {weight.min()}' input = input.clamp(1e-06, 1.0 - 1e-06) if weight is None: loss = th.sum(-self.alpha * target * (1 - input) ** self.gamma * th.log(input) - (1 - self.alpha) * (1 - target) * input ** self.gamma * th.log(1 - input)) else: loss = th.sum((-self.alpha * target * (1 - input) ** self.gamma * th.log(input) - (1 - self.alpha) * (1 - target) * input ** self.gamma * th.log(1 - input)) * weight) if self.size_average: loss /= input.nelement() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_clamp_div_log_mul_pow_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = -0.25 tmp2 = tmp0 * tmp1 tmp4 = 1e-06 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = 0.999999 tmp7 = triton_helpers.minimum(tmp5, tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp2 * tmp10 tmp12 = tl_math.log(tmp7) tmp13 = tmp11 * tmp12 tmp14 = tmp8 - tmp0 tmp15 = 0.75 tmp16 = tmp14 * tmp15 tmp17 = tmp7 * tmp7 tmp18 = tmp16 * tmp17 tmp19 = tl_math.log(tmp9) tmp20 = tmp18 * tmp19 tmp21 = tmp13 - tmp20 tmp22 = tl.broadcast_to(tmp21, [RBLOCK]) tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0)) tmp25 = 0.00390625 tmp26 = tmp24 * tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp26, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_clamp_div_log_mul_pow_rsub_sub_sum_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BinaryFocalLossNew(nn.Module): def __init__(self, gamma=2.0, alpha=0.25, size_average=True): super(BinaryFocalLossNew, self).__init__() self.gamma = gamma self.alpha = alpha self.size_average = size_average def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cumtchenchang/PPGNet
BinaryFocalLoss
false
15,088
[ "MIT" ]
171
9b280aacb887ec584e905b9f9ab006b4f4cb2cc3
https://github.com/cumtchenchang/PPGNet/tree/9b280aacb887ec584e905b9f9ab006b4f4cb2cc3
BCL
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jp/cjpvhqkd3jok4uhealf3kigj7a73jemeosb76xbly4kbrndkeu75.py # Topologically Sorted Source Nodes: [setitem, add_2, truediv, ne, mask, distance, pow_1, mul_1, sum_3, eq_1, float_2, sum_1, pos_num, loss_1, sub, truediv_2, mul_2, sub_1, clamp, pow_2, mul_3, sum_4, eq_2, float_3, sum_2, neg_num, loss_2, loss], Original ATen: [aten.lift_fresh, aten.index_put, aten.add, aten.div, aten.ne, aten._to_copy, aten.mul, aten.pow, aten.sum, aten.eq, aten.rsub, aten.clamp] # Source node to ATen node mapping: # add_2 => add_2 # clamp => clamp_min # distance => mul # eq_1 => eq_1 # eq_2 => eq_2 # float_2 => convert_element_type_1 # float_3 => convert_element_type_2 # loss => add_3 # loss_1 => div_1 # loss_2 => div_3 # mask => convert_element_type # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # ne => ne # neg_num => add_1 # pos_num => add # pow_1 => pow_1 # pow_2 => pow_2 # setitem => full_default, index_put # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # sum_2 => sum_2 # sum_3 => sum_3 # sum_4 => sum_4 # truediv => div # truediv_2 => div_2 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put : [num_users=5] = call_function[target=torch.ops.aten.index_put_.default](args = (%arg0_1, [%eq], %full_default), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%index_put, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 2), kwargs = {}) # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%index_put, 255), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.float32), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %convert_element_type), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %pow_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {}) # %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%index_put, 1), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq_1, torch.float32), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 0.0001), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %add), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %index_put), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %convert_element_type), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %mul), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %pow_2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {}) # %eq_2 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%index_put, -1), kwargs = {}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq_2, torch.float32), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type_2,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 0.0001), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, %add_1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, %div_3), kwargs = {}) triton_per_fused__to_copy_add_clamp_div_eq_index_put_lift_fresh_mul_ne_pow_rsub_sum_0 = async_compile.triton('triton_per_fused__to_copy_add_clamp_div_eq_index_put_lift_fresh_mul_ne_pow_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_clamp_div_eq_index_put_lift_fresh_mul_ne_pow_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0', 'in_ptr0', 'out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_add_clamp_div_eq_index_put_lift_fresh_mul_ne_pow_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp8 = tl.load(in_ptr1 + (r0), None) tmp1 = 255.0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = tl.where(tmp2, tmp3, tmp0) tmp5 = tmp4 + tmp3 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp9 = tmp4 != tmp1 tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 * tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp7 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = tmp3 - tmp4 tmp18 = tmp17 * tmp6 tmp19 = tmp18 * tmp10 tmp20 = 2.0 tmp21 = tmp20 - tmp11 tmp22 = 0.0 tmp23 = triton_helpers.maximum(tmp21, tmp22) tmp24 = tmp23 * tmp23 tmp25 = tmp19 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = tmp4 == tmp3 tmp30 = tmp29.to(tl.float32) tmp31 = tl.broadcast_to(tmp30, [RBLOCK]) tmp33 = triton_helpers.promote_to_tensor(tl.sum(tmp31, 0)) tmp34 = -1.0 tmp35 = tmp4 == tmp34 tmp36 = tmp35.to(tl.float32) tmp37 = tl.broadcast_to(tmp36, [RBLOCK]) tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0)) tmp40 = 0.0001 tmp41 = tmp33 + tmp40 tmp42 = tmp16 / tmp41 tmp43 = tmp39 + tmp40 tmp44 = tmp28 / tmp43 tmp45 = tmp42 + tmp44 tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp4, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp45, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf6 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [setitem, add_2, truediv, ne, mask, distance, pow_1, mul_1, sum_3, eq_1, float_2, sum_1, pos_num, loss_1, sub, truediv_2, mul_2, sub_1, clamp, pow_2, mul_3, sum_4, eq_2, float_3, sum_2, neg_num, loss_2, loss], Original ATen: [aten.lift_fresh, aten.index_put, aten.add, aten.div, aten.ne, aten._to_copy, aten.mul, aten.pow, aten.sum, aten.eq, aten.rsub, aten.clamp] stream0 = get_raw_stream(0) triton_per_fused__to_copy_add_clamp_div_eq_index_put_lift_fresh_mul_ne_pow_rsub_sum_0.run(buf6, arg0_1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch from torch import nn class BCL(nn.Module): """ batch-balanced contrastive loss no-change,1 change,-1 """ def __init__(self, margin=2.0): super(BCL, self).__init__() self.margin = margin def forward(self, distance, label): label[label == 255] = 1 mask = (label != 255).float() distance = distance * mask pos_num = torch.sum((label == 1).float()) + 0.0001 neg_num = torch.sum((label == -1).float()) + 0.0001 loss_1 = torch.sum((1 + label) / 2 * torch.pow(distance, 2)) / pos_num loss_2 = torch.sum((1 - label) / 2 * mask * torch.pow(torch.clamp( self.margin - distance, min=0.0), 2)) / neg_num loss = loss_1 + loss_2 return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_add_clamp_div_eq_index_put_lift_fresh_mul_ne_pow_rsub_sum_0( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp8 = tl.load(in_ptr1 + r0, None) tmp1 = 255.0 tmp2 = tmp0 == tmp1 tmp3 = 1.0 tmp4 = tl.where(tmp2, tmp3, tmp0) tmp5 = tmp4 + tmp3 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp9 = tmp4 != tmp1 tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 * tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp7 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = tmp3 - tmp4 tmp18 = tmp17 * tmp6 tmp19 = tmp18 * tmp10 tmp20 = 2.0 tmp21 = tmp20 - tmp11 tmp22 = 0.0 tmp23 = triton_helpers.maximum(tmp21, tmp22) tmp24 = tmp23 * tmp23 tmp25 = tmp19 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = tmp4 == tmp3 tmp30 = tmp29.to(tl.float32) tmp31 = tl.broadcast_to(tmp30, [RBLOCK]) tmp33 = triton_helpers.promote_to_tensor(tl.sum(tmp31, 0)) tmp34 = -1.0 tmp35 = tmp4 == tmp34 tmp36 = tmp35.to(tl.float32) tmp37 = tl.broadcast_to(tmp36, [RBLOCK]) tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0)) tmp40 = 0.0001 tmp41 = tmp33 + tmp40 tmp42 = tmp16 / tmp41 tmp43 = tmp39 + tmp40 tmp44 = tmp28 / tmp43 tmp45 = tmp42 + tmp44 tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp4, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp45, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf6 = buf2 del buf2 get_raw_stream(0) triton_per_fused__to_copy_add_clamp_div_eq_index_put_lift_fresh_mul_ne_pow_rsub_sum_0[ grid(1)](buf6, arg0_1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf6, class BCLNew(nn.Module): """ batch-balanced contrastive loss no-change,1 change,-1 """ def __init__(self, margin=2.0): super(BCLNew, self).__init__() self.margin = margin def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cuicaihao/STANet
BCL
false
15,089
[ "BSD-2-Clause" ]
220
4c644e2a65bc9516f1d97b29b12ca864638c0c7e
https://github.com/cuicaihao/STANet/tree/4c644e2a65bc9516f1d97b29b12ca864638c0c7e
MultConst
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/5l/c5lja42ucynhzaul7xv7gbc5hgzxyccxj7u735j75mxw7o725pey.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 255), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 255.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MultConst(nn.Module): def forward(self, input): return 255 * input def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 255.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MultConstNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
czczup/URST
MultConst
false
15,090
[ "Apache-2.0" ]
119
000ec9f7728f12ffad989ec1d07b1dd579514133
https://github.com/czczup/URST/tree/000ec9f7728f12ffad989ec1d07b1dd579514133
FIN2dCyclic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/vx/cvx2mkof7lntpjrpc65myom6nddnwk6dg2t67qoqxszlsadf3yea.py # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten._native_batch_norm_legit] # Source node to ATen node mapping: # instance_norm => add_2, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) triton_per_fused__native_batch_norm_legit_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gi/cgipapy5od54hlhyzela4goqtovvzza3cjq4ym4f4fx6bnn6ibir.py # Topologically Sorted Source Nodes: [mul_2, add_2], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add_2 => add_3 # mul_2 => mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_5), kwargs = {}) triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x4 = xindex % 256 x5 = (xindex // 16) % 16 x1 = (xindex // 16) % 4 x7 = (xindex // 64) x8 = xindex tmp0 = tl.load(in_ptr0 + (x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x5), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x7), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr7 + (x7), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp10 + tmp15 tl.store(out_ptr0 + (x8), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf3 = reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten._native_batch_norm_legit] stream0 = get_raw_stream(0) triton_per_fused__native_batch_norm_legit_0.run(buf3, primals_7, buf0, 16, 16, grid=grid(16), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4, 4, 4, 4), (4096, 1024, 256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, add_2], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_1.run(primals_7, buf0, buf3, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf4, 16384, grid=grid(16384), stream=stream0) del primals_1 del primals_3 del primals_4 del primals_6 return (buf4, primals_2, primals_5, primals_7, buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn class FIN2dCyclic(nn.Module): def __init__(self, dim): super().__init__() self.instance_norm = nn.InstanceNorm2d(dim, affine=False) self.a_gamma = nn.Parameter(torch.zeros(dim)) self.b_gamma = nn.Parameter(torch.ones(dim)) self.a_beta = nn.Parameter(torch.zeros(dim)) self.b_beta = nn.Parameter(torch.zeros(dim)) def forward(self, x, cos, sin): gamma = self.a_gamma * cos.unsqueeze(-1) + self.b_gamma beta = self.a_beta * sin.unsqueeze(-1) + self.b_beta return self.instance_norm(x) * gamma.unsqueeze(-1).unsqueeze(-1 ) + beta.unsqueeze(-1).unsqueeze(-1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex % 256 x5 = xindex // 16 % 16 x1 = xindex // 16 % 4 x7 = xindex // 64 x8 = xindex tmp0 = tl.load(in_ptr0 + x4, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x5, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x7, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr7 + x7, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp7 = tmp5 * tmp6 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp10 + tmp15 tl.store(out_ptr0 + x8, tmp16, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf3 = reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf1 get_raw_stream(0) triton_per_fused__native_batch_norm_legit_0[grid(16)](buf3, primals_7, buf0, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4, 4, 4, 4), (4096, 1024, 256, 64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_1[grid(16384)](primals_7, buf0, buf3, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf4, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 del primals_4 del primals_6 return buf4, primals_2, primals_5, primals_7, buf0, buf3 class FIN2dCyclicNew(nn.Module): def __init__(self, dim): super().__init__() self.instance_norm = nn.InstanceNorm2d(dim, affine=False) self.a_gamma = nn.Parameter(torch.zeros(dim)) self.b_gamma = nn.Parameter(torch.ones(dim)) self.a_beta = nn.Parameter(torch.zeros(dim)) self.b_beta = nn.Parameter(torch.zeros(dim)) def forward(self, input_0, input_1, input_2): primals_1 = self.a_gamma primals_3 = self.b_gamma primals_4 = self.a_beta primals_6 = self.b_beta primals_2 = input_0 primals_5 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
cv-rits/CoMoGAN
FIN2dCyclic
false
15,091
[ "Apache-2.0" ]
141
09f2f0f694421e289fcad467ca0b23f52e4da7a4
https://github.com/cv-rits/CoMoGAN/tree/09f2f0f694421e289fcad467ca0b23f52e4da7a4
BCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/f6/cf6msygnufcklcpvjj2djhdclyyqh6bvy7v6bxcvwogb4rkzv4nj.py # Topologically Sorted Source Nodes: [binary_cross_entropy], Original ATen: [aten.binary_cross_entropy] # Source node to ATen node mapping: # binary_cross_entropy => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) triton_per_fused_binary_cross_entropy_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy], Original ATen: [aten.binary_cross_entropy] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils def binary_cross_entropy(inputs, target, weight=None, reduction='mean', smooth_eps=None, from_logits=False): """cross entropy loss, with support for label smoothing https://arxiv.org/abs/1512.00567""" smooth_eps = smooth_eps or 0 if smooth_eps > 0: target = target.float() target.add_(smooth_eps).div_(2.0) if from_logits: return F.binary_cross_entropy_with_logits(inputs, target, weight= weight, reduction=reduction) else: return F.binary_cross_entropy(inputs, target, weight=weight, reduction=reduction) class BCELoss(nn.BCELoss): def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean', smooth_eps=None, from_logits=False): super(BCELoss, self).__init__(weight, size_average, reduce, reduction) self.smooth_eps = smooth_eps self.from_logits = from_logits def forward(self, input, target): return binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction, smooth_eps=self.smooth_eps, from_logits=self.from_logits) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def binary_cross_entropy(inputs, target, weight=None, reduction='mean', smooth_eps=None, from_logits=False): """cross entropy loss, with support for label smoothing https://arxiv.org/abs/1512.00567""" smooth_eps = smooth_eps or 0 if smooth_eps > 0: target = target.float() target.add_(smooth_eps).div_(2.0) if from_logits: return F.binary_cross_entropy_with_logits(inputs, target, weight= weight, reduction=reduction) else: return F.binary_cross_entropy(inputs, target, weight=weight, reduction=reduction) class BCELossNew(nn.BCELoss): def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean', smooth_eps=None, from_logits=False): super(BCELossNew, self).__init__(weight, size_average, reduce, reduction) self.smooth_eps = smooth_eps self.from_logits = from_logits def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
cwlacewe/SNAS-Series
BCELoss
false
15,092
[ "MIT" ]
133
92ac8031f718235aecaefb9967851f8f355dbca0
https://github.com/cwlacewe/SNAS-Series/tree/92ac8031f718235aecaefb9967851f8f355dbca0
GramMatrix
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/em/ceme7dtfw5rmqppp5ffl7f3bkktbsz45ls4n5npdu7izsivsrkbh.py # Topologically Sorted Source Nodes: [gram], Original ATen: [aten.div] # Source node to ATen node mapping: # gram => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, 64), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.015625 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) del arg0_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [gram], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(buf1, 64, grid=grid(64), stream=stream0) return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GramMatrix(nn.Module): def forward(self, y): b, ch, h, w = y.size() features = y.view(b, ch, w * h) features_t = features.transpose(1, 2) gram = features.bmm(features_t) / (ch * h * w) return gram def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.015625 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) del arg0_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(64)](buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, class GramMatrixNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
czczup/URST
GramMatrix
false
15,093
[ "Apache-2.0" ]
119
000ec9f7728f12ffad989ec1d07b1dd579514133
https://github.com/czczup/URST/tree/000ec9f7728f12ffad989ec1d07b1dd579514133
MetaAconC
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/s4/cs4unwn7tzvk4mxiocfpzxeruj4qbvvcfop5wxj2b5hnk2v2blmx.py # Topologically Sorted Source Nodes: [mean, y], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # y => mean_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2], True), kwargs = {}) # %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [3], True), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + (x0), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sq/csq4d5ywt2hw5pq3udelncicbksmbzyzkjeogeutctaorzizalid.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mean_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/52/c524v43xmx7ukrxeaoczfzadqo7vf445crtzjs6cjrmx5jmj4o7d.py # Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub] # Source node to ATen node mapping: # sub => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_6, %primals_7), kwargs = {}) triton_poi_fused_sub_3 = async_compile.triton('triton_poi_fused_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ti/ctihundrm7kheb2uxwglao7l3j6bknjcvbjqqdt73ofncyjrwleb.py # Topologically Sorted Source Nodes: [beta, dpx, mul_1, sigmoid_1, mul_2, mul_3, add], Original ATen: [aten.sigmoid, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # beta => sigmoid # dpx => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # sigmoid_1 => sigmoid_1 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %mul), kwargs = {}) # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %sigmoid_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %primals_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_4 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x3 = xindex x4 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp4 * tmp2 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tmp9 = tmp8 * tmp1 tmp10 = tmp7 + tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (4, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, y], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 16, 1, 1), (16, 1, 1, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf4, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf5 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sub], Original ATen: [aten.sub] triton_poi_fused_sub_3.run(primals_6, primals_7, buf5, 4, grid=grid(4), stream=stream0) del primals_6 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [beta, dpx, mul_1, sigmoid_1, mul_2, mul_3, add], Original ATen: [aten.sigmoid, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_4.run(buf5, primals_1, buf4, primals_7, buf6, 256, grid=grid(256), stream=stream0) del primals_7 return (buf6, primals_1, primals_2, primals_4, buf0, buf2, buf4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MetaAconC(nn.Module): """ ACON activation (activate or not). MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. """ def __init__(self, c1, k=1, s=1, r=16): super().__init__() c2 = max(r, c1 // r) self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) def forward(self, x): y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) beta = torch.sigmoid(self.fc2(self.fc1(y))) dpx = (self.p1 - self.p2) * x return dpx * torch.sigmoid(beta * dpx) + self.p2 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c1': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + x0, tmp36, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_sub_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex x4 = xindex // 16 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp4 * tmp2 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tmp9 = tmp8 * tmp1 tmp10 = tmp7 + tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (4, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 16, 1, 1), (16, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(16)](buf4, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_sub_3[grid(4)](primals_6, primals_7, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_4[grid(256)](buf5, primals_1, buf4, primals_7, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf6, primals_1, primals_2, primals_4, buf0, buf2, buf4, buf5 class MetaAconCNew(nn.Module): """ ACON activation (activate or not). MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. """ def __init__(self, c1, k=1, s=1, r=16): super().__init__() c2 = max(r, c1 // r) self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) def forward(self, input_0): primals_6 = self.p1 primals_7 = self.p2 primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
cuiboyuan/plato
MetaAconC
false
15,094
[ "Apache-2.0" ]
135
260b785cbbf8588c92331d6343211ff72321f90e
https://github.com/cuiboyuan/plato/tree/260b785cbbf8588c92331d6343211ff72321f90e
PreActBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/wo/cwo5hzyj7r5kfs5qkbujhau55erj2h3367t3krgxxma4ysrszby7.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # out => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %primals_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6i/c6imvz2t7myq5psnhgdc7rjq6xlnhv5d2i7yxhhnwuyzqng5z6rs.py # Topologically Sorted Source Nodes: [leaky_relu_1], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # leaky_relu_1 => gt_1, mul_1, where_1 # Graph fragment: # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 0.2), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %squeeze, %mul_1), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr1 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/y4/cy4ywivrvoulzmyoy5vjymbnro5whqtv6677rwbojlx53jirk7ab.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] # Source node to ATen node mapping: # out_3 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_1, %primals_1), kwargs = {}) triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_2, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [leaky_relu_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf1, buf2, buf3, 256, grid=grid(256), stream=stream0) del buf1 # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 4, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_3, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf4, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] triton_poi_fused_add_2.run(buf5, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 return (buf5, primals_2, primals_3, reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2, reinterpret_tensor(buf3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PreActBlock(nn.Module): """Pre-activation version of the BasicBlock.""" expansion = 1 def __init__(self, in_planes, planes, num_group=4, stride=1, bias=False): super(PreActBlock, self).__init__() self.conv1 = nn.Conv3d(in_planes, planes, kernel_size=3, stride= stride, padding=1, bias=bias) self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=1, padding=1, bias=bias) if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential(nn.Conv3d(in_planes, self. expansion * planes, kernel_size=1, stride=stride, bias=bias)) def forward(self, x): out = F.leaky_relu(x, negative_slope=0.2) shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x out = self.conv1(out) out = self.conv2(F.leaky_relu(out, negative_slope=0.2)) out += shortcut return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_2, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(256)](buf1, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 4, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_3, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf4, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_add_2[grid(256)](buf5, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf5, primals_2, primals_3, reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2, reinterpret_tensor(buf3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) class PreActBlockNew(nn.Module): """Pre-activation version of the BasicBlock.""" expansion = 1 def __init__(self, in_planes, planes, num_group=4, stride=1, bias=False): super(PreActBlockNew, self).__init__() self.conv1 = nn.Conv3d(in_planes, planes, kernel_size=3, stride= stride, padding=1, bias=bias) self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=1, padding=1, bias=bias) if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential(nn.Conv3d(in_planes, self. expansion * planes, kernel_size=1, stride=stride, bias=bias)) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
cwmok/LapIRN
PreActBlock
false
15,095
[ "MIT" ]
53
d8f96770a704b1f190955cc26297c7b01a270b0a
https://github.com/cwmok/LapIRN/tree/d8f96770a704b1f190955cc26297c7b01a270b0a
DomainClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ly/clysrgfou7yzgfvxig5r6lqzp27ay64ivhxjtotr6s67wbodijvt.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_1 => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.1 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + (x0), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dp/cdp4mwv7d2c6zhnuyyhkhpkhom5xgpli6jvwtmxhakk354plhfh7.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_3 => gt_1, mul_1, where_1 # Graph fragment: # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.1), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.1 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + (x0), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/y6/cy6sgu3hayct2vjeqaosxtvyerhnxyvhtvtjywhrcktkx6clc43q.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_5 => gt_2, mul_2, where_2 # Graph fragment: # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.1), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.1 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + (x0), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tx/ctxipwlvawbf76dnsx57all2lk6tudsfcnjejkddesp3dwhqpzxs.py # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_7 => gt_3, mul_3, where_3 # Graph fragment: # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.1), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) triton_poi_fused_leaky_relu_3 = async_compile.triton('triton_poi_fused_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_3(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.1 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + (x0), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qy/cqyawun55kj3ztecfdtuqd27b3ep6j7n2mtwqtogrr2nbrc7suda.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_9 => gt_4, mul_4, where_4 # Graph fragment: # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.1), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) triton_poi_fused_leaky_relu_4 = async_compile.triton('triton_poi_fused_leaky_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_4(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.1 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + (x0), tmp5, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (64, 1024, 4, 4), (16384, 16, 4, 1)) assert_size_stride(primals_2, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_6, (1024, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_7, (1, 1024, 2, 2), (4096, 4, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf1, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 16, 16), (32768, 256, 16, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf3, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_2.run(buf5, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_5, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 512, 4, 4), (8192, 16, 4, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_3.run(buf7, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1024, 2, 2), (4096, 4, 2, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_4.run(buf9, 16384, grid=grid(16384), stream=stream0) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 1, 1, 1), (1, 1, 1, 1)) return (buf10, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, buf1, buf3, buf5, buf7, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 1024, 4, 4), (16384, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1024, 64, 64), (4194304, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((512, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1024, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 1024, 2, 2), (4096, 4, 2, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.parallel import torch.optim import torch.nn as nn class DomainClassifier(nn.Module): def __init__(self, input_dim=1024, ndf=64, with_bias=False): super(DomainClassifier, self).__init__() self.conv1 = nn.Conv2d(input_dim, ndf, kernel_size=4, stride=2, padding=1, bias=with_bias) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1, bias=with_bias) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1, bias=with_bias) self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1, bias=with_bias) self.conv5 = nn.Conv2d(ndf * 8, ndf * 16, kernel_size=4, stride=2, padding=1, bias=with_bias) self.conv6 = nn.Conv2d(ndf * 16, 1, kernel_size=2, stride=1, bias= with_bias) self.leaky_relu = nn.LeakyReLU(negative_slope=0.1, inplace=True) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, mean=0, std=0.001) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x): x = self.conv1(x) x = self.leaky_relu(x) x = self.conv2(x) x = self.leaky_relu(x) x = self.conv3(x) x = self.leaky_relu(x) x = self.conv4(x) x = self.leaky_relu(x) x = self.conv5(x) x = self.leaky_relu(x) x = self.conv6(x) return x def get_inputs(): return [torch.rand([4, 1024, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.parallel import torch.optim import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_leaky_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.1 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.1 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_leaky_relu_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.1 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_leaky_relu_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.1 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_leaky_relu_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.1 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(in_out_ptr0 + x0, tmp5, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 1024, 4, 4), (16384, 16, 4, 1)) assert_size_stride(primals_2, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_6, (1024, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_7, (1, 1024, 2, 2), (4096, 4, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(262144)](buf1, 262144, XBLOCK= 512, num_warps=8, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 16, 16), (32768, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_leaky_relu_1[grid(131072)](buf3, 131072, XBLOCK= 1024, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = buf4 del buf4 triton_poi_fused_leaky_relu_2[grid(65536)](buf5, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_5, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 512, 4, 4), (8192, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_leaky_relu_3[grid(32768)](buf7, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1024, 2, 2), (4096, 4, 2, 1)) buf9 = buf8 del buf8 triton_poi_fused_leaky_relu_4[grid(16384)](buf9, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 1, 1, 1), (1, 1, 1, 1)) return (buf10, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, buf1, buf3, buf5, buf7, buf9) class DomainClassifierNew(nn.Module): def __init__(self, input_dim=1024, ndf=64, with_bias=False): super(DomainClassifierNew, self).__init__() self.conv1 = nn.Conv2d(input_dim, ndf, kernel_size=4, stride=2, padding=1, bias=with_bias) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1, bias=with_bias) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1, bias=with_bias) self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1, bias=with_bias) self.conv5 = nn.Conv2d(ndf * 8, ndf * 16, kernel_size=4, stride=2, padding=1, bias=with_bias) self.conv6 = nn.Conv2d(ndf * 16, 1, kernel_size=2, stride=1, bias= with_bias) self.leaky_relu = nn.LeakyReLU(negative_slope=0.1, inplace=True) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, mean=0, std=0.001) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, input_0): primals_1 = self.conv1.weight primals_3 = self.conv2.weight primals_4 = self.conv3.weight primals_5 = self.conv4.weight primals_6 = self.conv5.weight primals_7 = self.conv6.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
chaneyddtt/UDA-Animal-Pose
DomainClassifier
false
15,096
[ "MIT" ]
61
f1ebfda860a2585c60fe86ce1632e910ac97ebc5
https://github.com/chaneyddtt/UDA-Animal-Pose/tree/f1ebfda860a2585c60fe86ce1632e910ac97ebc5
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ud/cud5diybwrcgcrwz72q7ptq4ubvr3sly4tc53m6uwovofwm4pt6u.py # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.sub] # Source node to ATen node mapping: # outputs => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %unsqueeze), kwargs = {}) triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ac/cacdd5t2o6o7ejyngvjelhqueeqblsuofnb67t7fjo43to2be7do.py # Topologically Sorted Source Nodes: [add, std, outputs_1, outputs_2, outputs_3], Original ATen: [aten.add, aten.pow, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # outputs_1 => div # outputs_2 => mul # outputs_3 => add_1 # std => pow_2 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_1, 1e-12), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %pow_2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {}) triton_poi_fused_add_div_mul_pow_1 = async_compile.triton('triton_poi_fused_add_div_mul_pow_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 * tmp15 tmp17 = tmp0 / tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_sub_0.run(primals_3, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, std, outputs_1, outputs_2, outputs_3], Original ATen: [aten.add, aten.pow, aten.div, aten.mul] triton_poi_fused_add_div_mul_pow_1.run(buf0, primals_2, primals_1, buf1, 256, grid=grid(256), stream=stream0) del buf0 del primals_1 del primals_2 return (buf1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn.parameter import Parameter class LayerNorm(nn.Module): def __init__(self, input_dim, cond_dim=0, center=True, scale=True, epsilon=None, conditional=False, hidden_units=None, hidden_activation='linear', hidden_initializer='xaiver', **kwargs): super(LayerNorm, self).__init__() """ input_dim: inputs.shape[-1] cond_dim: cond.shape[-1] """ self.center = center self.scale = scale self.conditional = conditional self.hidden_units = hidden_units self.hidden_initializer = hidden_initializer self.epsilon = epsilon or 1e-12 self.input_dim = input_dim self.cond_dim = cond_dim if self.center: self.beta = Parameter(torch.zeros(input_dim)) if self.scale: self.gamma = Parameter(torch.ones(input_dim)) if self.conditional: if self.hidden_units is not None: self.hidden_dense = nn.Linear(in_features=self.cond_dim, out_features=self.hidden_units, bias=False) if self.center: self.beta_dense = nn.Linear(in_features=self.cond_dim, out_features=input_dim, bias=False) if self.scale: self.gamma_dense = nn.Linear(in_features=self.cond_dim, out_features=input_dim, bias=False) self.initialize_weights() def initialize_weights(self): if self.conditional: if self.hidden_units is not None: if self.hidden_initializer == 'normal': torch.nn.init.normal(self.hidden_dense.weight) elif self.hidden_initializer == 'xavier': torch.nn.init.xavier_uniform_(self.hidden_dense.weight) if self.center: torch.nn.init.constant_(self.beta_dense.weight, 0) if self.scale: torch.nn.init.constant_(self.gamma_dense.weight, 0) def forward(self, inputs, cond=None): """ 如果是条件Layer Norm,则cond不是None """ if self.conditional: if self.hidden_units is not None: cond = self.hidden_dense(cond) for _ in range(len(inputs.shape) - len(cond.shape)): cond = cond.unsqueeze(1) if self.center: beta = self.beta_dense(cond) + self.beta if self.scale: gamma = self.gamma_dense(cond) + self.gamma else: if self.center: beta = self.beta if self.scale: gamma = self.gamma outputs = inputs if self.center: mean = torch.mean(outputs, dim=-1).unsqueeze(-1) outputs = outputs - mean if self.scale: variance = torch.mean(outputs ** 2, dim=-1).unsqueeze(-1) std = (variance + self.epsilon) ** 2 outputs = outputs / std outputs = outputs * gamma if self.center: outputs = outputs + beta return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_div_mul_pow_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 * tmp15 tmp17 = tmp0 / tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_pow_1[grid(256)](buf0, primals_2, primals_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_1 del primals_2 return buf1, primals_3 class LayerNormNew(nn.Module): def __init__(self, input_dim, cond_dim=0, center=True, scale=True, epsilon=None, conditional=False, hidden_units=None, hidden_activation='linear', hidden_initializer='xaiver', **kwargs): super(LayerNormNew, self).__init__() """ input_dim: inputs.shape[-1] cond_dim: cond.shape[-1] """ self.center = center self.scale = scale self.conditional = conditional self.hidden_units = hidden_units self.hidden_initializer = hidden_initializer self.epsilon = epsilon or 1e-12 self.input_dim = input_dim self.cond_dim = cond_dim if self.center: self.beta = Parameter(torch.zeros(input_dim)) if self.scale: self.gamma = Parameter(torch.ones(input_dim)) if self.conditional: if self.hidden_units is not None: self.hidden_dense = nn.Linear(in_features=self.cond_dim, out_features=self.hidden_units, bias=False) if self.center: self.beta_dense = nn.Linear(in_features=self.cond_dim, out_features=input_dim, bias=False) if self.scale: self.gamma_dense = nn.Linear(in_features=self.cond_dim, out_features=input_dim, bias=False) self.initialize_weights() def initialize_weights(self): if self.conditional: if self.hidden_units is not None: if self.hidden_initializer == 'normal': torch.nn.init.normal(self.hidden_dense.weight) elif self.hidden_initializer == 'xavier': torch.nn.init.xavier_uniform_(self.hidden_dense.weight) if self.center: torch.nn.init.constant_(self.beta_dense.weight, 0) if self.scale: torch.nn.init.constant_(self.gamma_dense.weight, 0) def forward(self, input_0): primals_1 = self.beta primals_2 = self.gamma primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
cwxcwx0319/Dictionary
LayerNorm
false
15,097
[ "Apache-2.0" ]
82
55fb9a602a212f9c3a69a318fec31da1d07279df
https://github.com/cwxcwx0319/Dictionary/tree/55fb9a602a212f9c3a69a318fec31da1d07279df
ThumbAdaptiveInstanceNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/se/cseopt62aqmfhlprpzwzv7xtkglkcidorjfsfgpj4igghyvojizh.py # Topologically Sorted Source Nodes: [mean_1, sub, var_1, feat_var_1, sqrt_1, normalized_feat, var, mul, mean, add_2], Original ATen: [aten.mean, aten.sub, aten.var, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add_2 => add_2 # feat_var_1 => add_1 # mean => mean # mean_1 => mean_1 # mul => mul # normalized_feat => div # sqrt_1 => sqrt_1 # sub => sub # var => var # var_1 => var_1 # Graph fragment: # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_6, [2]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %expand), kwargs = {}) # %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view_4, [2]), kwargs = {correction: 1}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var_1, 1e-05), kwargs = {}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %expand_1), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [2]), kwargs = {correction: 1}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %expand_2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_2, [2]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_3), kwargs = {}) triton_per_fused_add_div_mean_mul_sqrt_sub_var_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_sqrt_sub_var_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sqrt_sub_var_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 8, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_sqrt_sub_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp26 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = 15.0 tmp22 = tmp18 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp24) tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.where(xmask, tmp27, 0) tmp30 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp32 = tl.where(xmask, tmp30, 0) tmp33 = tl.sum(tmp32, 1)[:, None] tmp34 = tmp33 / tmp11 tmp35 = tmp27 - tmp34 tmp36 = tmp35 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.where(xmask, tmp37, 0) tmp40 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.sum(tmp29, 1)[:, None] tmp43 = tmp0 - tmp20 tmp44 = tmp43 / tmp25 tmp45 = tmp40 / tmp21 tmp46 = tmp45 + tmp23 tmp47 = libdevice.sqrt(tmp46) tmp48 = tmp44 * tmp47 tmp49 = tmp42 / tmp19 tmp50 = tmp48 + tmp49 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp25, xmask) tl.store(out_ptr2 + (r1 + (16*x0)), tmp50, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0; del buf0 # reuse buf5 = buf3; del buf3 # reuse buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean_1, sub, var_1, feat_var_1, sqrt_1, normalized_feat, var, mul, mean, add_2], Original ATen: [aten.mean, aten.sub, aten.var, aten.add, aten.sqrt, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_mul_sqrt_sub_var_0.run(buf1, buf5, arg0_1, arg1_1, buf10, 16, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 return (buf10, reinterpret_tensor(buf5, (4, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ThumbInstanceNorm(nn.Module): def __init__(self, out_channels=None, affine=True): super(ThumbInstanceNorm, self).__init__() self.thumb_mean = None self.thumb_std = None self.collection = True if affine is True: self.weight = nn.Parameter(torch.ones(size=(1, out_channels, 1, 1), requires_grad=True)) self.bias = nn.Parameter(torch.zeros(size=(1, out_channels, 1, 1), requires_grad=True)) def calc_mean_std(self, feat, eps=1e-05): size = feat.size() assert len(size) == 4 N, C = size[:2] feat_var = feat.view(N, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std def forward(self, x, thumb=None): if self.training: thumb_mean, thumb_std = self.calc_mean_std(thumb) x = (x - thumb_mean) / thumb_std * self.weight + self.bias thumb = (thumb - thumb_mean) / thumb_std * self.weight + self.bias return x, thumb else: if self.collection: thumb_mean, thumb_std = self.calc_mean_std(x) self.thumb_mean = thumb_mean self.thumb_std = thumb_std x = (x - self.thumb_mean ) / self.thumb_std * self.weight + self.bias return x class ThumbAdaptiveInstanceNorm(ThumbInstanceNorm): def __init__(self): super(ThumbAdaptiveInstanceNorm, self).__init__(affine=False) def forward(self, content_feat, style_feat): assert content_feat.size()[:2] == style_feat.size()[:2] size = content_feat.size() style_mean, style_std = self.calc_mean_std(style_feat) if self.collection is True: thumb_mean, thumb_std = self.calc_mean_std(content_feat) self.thumb_mean = thumb_mean self.thumb_std = thumb_std normalized_feat = (content_feat - self.thumb_mean.expand(size) ) / self.thumb_std.expand(size) return normalized_feat * style_std.expand(size) + style_mean.expand( size) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mean_mul_sqrt_sub_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp26 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = 15.0 tmp22 = tmp18 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp24) tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.where(xmask, tmp27, 0) tmp30 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp32 = tl.where(xmask, tmp30, 0) tmp33 = tl.sum(tmp32, 1)[:, None] tmp34 = tmp33 / tmp11 tmp35 = tmp27 - tmp34 tmp36 = tmp35 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.where(xmask, tmp37, 0) tmp40 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.sum(tmp29, 1)[:, None] tmp43 = tmp0 - tmp20 tmp44 = tmp43 / tmp25 tmp45 = tmp40 / tmp21 tmp46 = tmp45 + tmp23 tmp47 = libdevice.sqrt(tmp46) tmp48 = tmp44 * tmp47 tmp49 = tmp42 / tmp19 tmp50 = tmp48 + tmp49 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp25, xmask) tl.store(out_ptr2 + (r1 + 16 * x0), tmp50, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 buf5 = buf3 del buf3 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_mul_sqrt_sub_var_0[grid(16)](buf1, buf5, arg0_1, arg1_1, buf10, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf10, reinterpret_tensor(buf5, (4, 4, 1, 1), (4, 1, 1, 1), 0 ), reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0) class ThumbInstanceNorm(nn.Module): def __init__(self, out_channels=None, affine=True): super(ThumbInstanceNorm, self).__init__() self.thumb_mean = None self.thumb_std = None self.collection = True if affine is True: self.weight = nn.Parameter(torch.ones(size=(1, out_channels, 1, 1), requires_grad=True)) self.bias = nn.Parameter(torch.zeros(size=(1, out_channels, 1, 1), requires_grad=True)) def calc_mean_std(self, feat, eps=1e-05): size = feat.size() assert len(size) == 4 N, C = size[:2] feat_var = feat.view(N, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std def forward(self, x, thumb=None): if self.training: thumb_mean, thumb_std = self.calc_mean_std(thumb) x = (x - thumb_mean) / thumb_std * self.weight + self.bias thumb = (thumb - thumb_mean) / thumb_std * self.weight + self.bias return x, thumb else: if self.collection: thumb_mean, thumb_std = self.calc_mean_std(x) self.thumb_mean = thumb_mean self.thumb_std = thumb_std x = (x - self.thumb_mean ) / self.thumb_std * self.weight + self.bias return x class ThumbAdaptiveInstanceNormNew(ThumbInstanceNorm): def __init__(self): super(ThumbAdaptiveInstanceNormNew, self).__init__(affine=False) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
czczup/URST
ThumbAdaptiveInstanceNorm
false
15,099
[ "Apache-2.0" ]
119
000ec9f7728f12ffad989ec1d07b1dd579514133
https://github.com/czczup/URST/tree/000ec9f7728f12ffad989ec1d07b1dd579514133
resnet_block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/p6/cp6er34jahf2hwcdedfbsxmfl2za3bldykz6q6mcvtfzgekia4pv.py # Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] # Source node to ATen node mapping: # output => add, rsqrt, var_mean # output_1 => gt, mul_1, where # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.02), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_1), kwargs = {}) triton_per_fused__native_batch_norm_legit_leaky_relu_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.02 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tl.store(out_ptr2 + (r1 + (16*x0)), tmp28, xmask) tl.store(out_ptr3 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3l/c3lgn4c2gqtivmlbud5aer6bxu4l5tpqee4zohammomjdg5cbwxy.py # Topologically Sorted Source Nodes: [output_2, output_3, output_4], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # output_2 => add_1, rsqrt_1, var_mean_1 # output_3 => add_2 # output_4 => gt_1, mul_3, where_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %primals_2), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_2, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.02), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_2, %mul_3), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_leaky_relu_leaky_relu_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_leaky_relu_leaky_relu_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_leaky_relu_leaky_relu_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_leaky_relu_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.02 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tmp31 = tmp30 > tmp26 tl.store(out_ptr2 + (r1 + (16*x0)), tmp30, xmask) tl.store(out_ptr3 + (r1 + (16*x0)), tmp31, xmask) tl.store(out_ptr4 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] stream0 = get_raw_stream(0) triton_per_fused__native_batch_norm_legit_leaky_relu_0.run(buf0, buf1, buf5, buf4, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [output_2, output_3, output_4], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu, aten.leaky_relu_backward] triton_per_fused__native_batch_norm_legit_add_leaky_relu_leaky_relu_backward_1.run(buf6, primals_2, buf7, buf11, buf12, buf10, 16, 16, grid=grid(16), stream=stream0) return (buf11, primals_1, primals_2, primals_3, buf0, reinterpret_tensor(buf4, (16, ), (1, ), 0), buf5, buf6, reinterpret_tensor(buf10, (16, ), (1, ), 0), buf12, reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class resnet_block(nn.Module): def __init__(self, dim_in, dim_out): super(resnet_block, self).__init__() self.dim_in = dim_in self.dim_out = dim_out if self.dim_in == self.dim_out: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_1 = nn.InstanceNorm2d(self.dim_out) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_2 = nn.InstanceNorm2d(self.dim_out) else: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False) self.bn_1 = nn.InstanceNorm2d(self.dim_out) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_2 = nn.InstanceNorm2d(self.dim_out) self.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False) self.bn_s = nn.InstanceNorm2d(self.dim_out) def forward(self, input): if self.dim_in == self.dim_out: output = self.bn_1(self.conv_1(input)) output = F.leaky_relu(output, negative_slope=0.02, inplace=True) output = self.bn_2(self.conv_2(output)) output = output + input output = F.leaky_relu(output, negative_slope=0.02, inplace=True) else: output = self.bn_1(self.conv_1(input)) output = F.leaky_relu(output, negative_slope=0.02, inplace=True) output = self.bn_2(self.conv_2(output)) input_ = self.bn_s(self.conv_s(input)) output = output + input_ output = F.leaky_relu(output, negative_slope=0.02, inplace=True) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.02 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tl.store(out_ptr2 + (r1 + 16 * x0), tmp28, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_leaky_relu_backward_1( in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.02 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tmp31 = tmp30 > tmp26 tl.store(out_ptr2 + (r1 + 16 * x0), tmp30, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_leaky_relu_0[grid(16)](buf0, buf1, buf5, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_add_leaky_relu_leaky_relu_backward_1[ grid(16)](buf6, primals_2, buf7, buf11, buf12, buf10, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) return buf11, primals_1, primals_2, primals_3, buf0, reinterpret_tensor( buf4, (16,), (1,), 0), buf5, buf6, reinterpret_tensor(buf10, (16,), (1,), 0), buf12, reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0) class resnet_blockNew(nn.Module): def __init__(self, dim_in, dim_out): super(resnet_blockNew, self).__init__() self.dim_in = dim_in self.dim_out = dim_out if self.dim_in == self.dim_out: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_1 = nn.InstanceNorm2d(self.dim_out) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_2 = nn.InstanceNorm2d(self.dim_out) else: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False) self.bn_1 = nn.InstanceNorm2d(self.dim_out) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_2 = nn.InstanceNorm2d(self.dim_out) self.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False) self.bn_s = nn.InstanceNorm2d(self.dim_out) def forward(self, input_0): primals_1 = self.conv_1.weight primals_3 = self.conv_2.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
czq142857/DECOR-GAN
resnet_block
false
15,102
[ "MIT" ]
55
79c80fc202b8af982989a3e3bb3afe85e606b71f
https://github.com/czq142857/DECOR-GAN/tree/79c80fc202b8af982989a3e3bb3afe85e606b71f
VQVAEQuantize
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bo/cbo7q7zee63izu352yw3zialbxihegxnicfldmmdq5fg6gujysyb.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 2), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) x2 = xindex tmp0 = tl.load(in_ptr0 + ((16*x1) + (64*(x0 // 16)) + (x0 % 16)), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/37/c37xdudc3uphft7hgfru75ztfuwwdgclqnwfc55yd7m3td7r6gck.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sub, dist, neg, max_1], Original ATen: [aten.pow, aten.sum, aten.sub, aten.add, aten.neg, aten.max] # Source node to ATen node mapping: # dist => add # max_1 => max_1 # neg => neg # pow_1 => pow_1 # sub => sub # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %mm), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %permute_2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add,), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%neg, 1), kwargs = {}) triton_poi_fused_add_max_neg_pow_sub_sum_1 = async_compile.triton('triton_poi_fused_add_max_neg_pow_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_neg_pow_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 28, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_max_neg_pow_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + ((64*(x0 // 16)) + (x0 % 16)), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (16 + (64*(x0 // 16)) + (x0 % 16)), xmask) tmp6 = tl.load(in_ptr1 + (1)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (32 + (64*(x0 // 16)) + (x0 % 16)), xmask) tmp12 = tl.load(in_ptr1 + (2)) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (48 + (64*(x0 // 16)) + (x0 % 16)), xmask) tmp18 = tl.load(in_ptr1 + (3)) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp23 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (0)) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp28 = tl.load(in_ptr3 + (1)) tmp29 = tl.broadcast_to(tmp28, [XBLOCK]) tmp32 = tl.load(in_ptr3 + (2)) tmp33 = tl.broadcast_to(tmp32, [XBLOCK]) tmp36 = tl.load(in_ptr3 + (3)) tmp37 = tl.broadcast_to(tmp36, [XBLOCK]) tmp42 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp44 = tl.load(in_ptr3 + (4)) tmp45 = tl.broadcast_to(tmp44, [XBLOCK]) tmp47 = tl.load(in_ptr3 + (5)) tmp48 = tl.broadcast_to(tmp47, [XBLOCK]) tmp51 = tl.load(in_ptr3 + (6)) tmp52 = tl.broadcast_to(tmp51, [XBLOCK]) tmp55 = tl.load(in_ptr3 + (7)) tmp56 = tl.broadcast_to(tmp55, [XBLOCK]) tmp76 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp78 = tl.load(in_ptr3 + (8)) tmp79 = tl.broadcast_to(tmp78, [XBLOCK]) tmp81 = tl.load(in_ptr3 + (9)) tmp82 = tl.broadcast_to(tmp81, [XBLOCK]) tmp85 = tl.load(in_ptr3 + (10)) tmp86 = tl.broadcast_to(tmp85, [XBLOCK]) tmp89 = tl.load(in_ptr3 + (11)) tmp90 = tl.broadcast_to(tmp89, [XBLOCK]) tmp109 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp111 = tl.load(in_ptr3 + (12)) tmp112 = tl.broadcast_to(tmp111, [XBLOCK]) tmp114 = tl.load(in_ptr3 + (13)) tmp115 = tl.broadcast_to(tmp114, [XBLOCK]) tmp118 = tl.load(in_ptr3 + (14)) tmp119 = tl.broadcast_to(tmp118, [XBLOCK]) tmp122 = tl.load(in_ptr3 + (15)) tmp123 = tl.broadcast_to(tmp122, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp24 = tmp22 - tmp23 tmp27 = tmp26 * tmp26 tmp30 = tmp29 * tmp29 tmp31 = tmp27 + tmp30 tmp34 = tmp33 * tmp33 tmp35 = tmp31 + tmp34 tmp38 = tmp37 * tmp37 tmp39 = tmp35 + tmp38 tmp40 = tmp24 + tmp39 tmp41 = -tmp40 tmp43 = tmp22 - tmp42 tmp46 = tmp45 * tmp45 tmp49 = tmp48 * tmp48 tmp50 = tmp46 + tmp49 tmp53 = tmp52 * tmp52 tmp54 = tmp50 + tmp53 tmp57 = tmp56 * tmp56 tmp58 = tmp54 + tmp57 tmp59 = tmp43 + tmp58 tmp60 = -tmp59 tmp61 = tmp41 > tmp60 tmp62 = tmp41 == tmp60 tmp63 = tmp41 != tmp41 tmp64 = tmp60 != tmp60 tmp65 = tmp63 > tmp64 tmp66 = tmp61 | tmp65 tmp67 = tmp63 & tmp64 tmp68 = tmp62 | tmp67 tmp69 = tl.full([1], 0, tl.int64) tmp70 = tl.full([1], 1, tl.int64) tmp71 = tmp69 < tmp70 tmp72 = tmp68 & tmp71 tmp73 = tmp66 | tmp72 tmp74 = tl.where(tmp73, tmp41, tmp60) tmp75 = tl.where(tmp73, tmp69, tmp70) tmp77 = tmp22 - tmp76 tmp80 = tmp79 * tmp79 tmp83 = tmp82 * tmp82 tmp84 = tmp80 + tmp83 tmp87 = tmp86 * tmp86 tmp88 = tmp84 + tmp87 tmp91 = tmp90 * tmp90 tmp92 = tmp88 + tmp91 tmp93 = tmp77 + tmp92 tmp94 = -tmp93 tmp95 = tmp74 > tmp94 tmp96 = tmp74 == tmp94 tmp97 = tmp74 != tmp74 tmp98 = tmp94 != tmp94 tmp99 = tmp97 > tmp98 tmp100 = tmp95 | tmp99 tmp101 = tmp97 & tmp98 tmp102 = tmp96 | tmp101 tmp103 = tl.full([1], 2, tl.int64) tmp104 = tmp75 < tmp103 tmp105 = tmp102 & tmp104 tmp106 = tmp100 | tmp105 tmp107 = tl.where(tmp106, tmp74, tmp94) tmp108 = tl.where(tmp106, tmp75, tmp103) tmp110 = tmp22 - tmp109 tmp113 = tmp112 * tmp112 tmp116 = tmp115 * tmp115 tmp117 = tmp113 + tmp116 tmp120 = tmp119 * tmp119 tmp121 = tmp117 + tmp120 tmp124 = tmp123 * tmp123 tmp125 = tmp121 + tmp124 tmp126 = tmp110 + tmp125 tmp127 = -tmp126 tmp128 = tmp107 > tmp127 tmp129 = tmp107 == tmp127 tmp130 = tmp107 != tmp107 tmp131 = tmp127 != tmp127 tmp132 = tmp130 > tmp131 tmp133 = tmp128 | tmp132 tmp134 = tmp130 & tmp131 tmp135 = tmp129 | tmp134 tmp136 = tl.full([1], 3, tl.int64) tmp137 = tmp108 < tmp136 tmp138 = tmp135 & tmp137 tmp139 = tmp133 | tmp138 tmp140 = tl.where(tmp139, tmp107, tmp127) tmp141 = tl.where(tmp139, tmp108, tmp136) tl.store(out_ptr1 + (x0), tmp141, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5o/c5oi7gfz6b6uijhoomk6x4x4cvkglml3pzkjy5rnmmnkfpf4klpy.py # Topologically Sorted Source Nodes: [z_q, sub_1, pow_3, mean, mul_1, diff, diff_1], Original ATen: [aten.embedding, aten.sub, aten.pow, aten.mean, aten.mul, aten.add] # Source node to ATen node mapping: # diff => add_1 # diff_1 => mul_2 # mean => mean # mul_1 => mul_1 # pow_3 => pow_3 # sub_1 => sub_1 # z_q => embedding # Graph fragment: # %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_4, %view_1), kwargs = {}) # %sub_1 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%embedding, %permute), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.25), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mean), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 10.0), kwargs = {}) triton_per_fused_add_embedding_mean_mul_pow_sub_2 = async_compile.triton('triton_per_fused_add_embedding_mean_mul_pow_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_embedding_mean_mul_pow_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_embedding_mean_mul_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r2 = (rindex // 64) r1 = (rindex // 16) % 4 r3 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (16*r2)), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (r3), None) tmp8 = tl.load(in_ptr3 + (r1), None, eviction_policy='evict_last') tmp1 = tl.full([RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (r1 + (4*tmp4)), None, eviction_policy='evict_last') tmp9 = tmp7 + tmp8 tmp10 = tmp6 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = 0.25 tmp18 = tmp16 * tmp17 tmp19 = tmp18 + tmp16 tmp20 = 10.0 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dk/cdkr3ri2dfekp3npugb4xrgf7cbi4sc2a5hvtsmnjhvkhs6ljhce.py # Topologically Sorted Source Nodes: [z_q, sub_1, z_q_1, z_q_2], Original ATen: [aten.embedding, aten.sub, aten.add, aten.permute] # Source node to ATen node mapping: # sub_1 => sub_1 # z_q => embedding # z_q_1 => add_2 # z_q_2 => permute_3 # Graph fragment: # %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_4, %view_1), kwargs = {}) # %sub_1 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%embedding, %permute), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute, %sub_1), kwargs = {}) # %permute_3 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%add_2, [0, 3, 1, 2]), kwargs = {}) triton_poi_fused_add_embedding_permute_sub_3 = async_compile.triton('triton_poi_fused_add_embedding_permute_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_embedding_permute_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_embedding_permute_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tl.full([XBLOCK, YBLOCK], 4, tl.int32) tmp5 = tmp3 + tmp4 tmp6 = tmp3 < 0 tmp7 = tl.where(tmp6, tmp5, tmp3) tl.device_assert(((0 <= tmp7) & (tmp7 < 4)) | ~(xmask & ymask), "index out of bounds: 0 <= tmp7 < 4") tmp9 = tl.load(in_ptr3 + (y0 + (4*tmp7)), xmask & ymask) tmp10 = tmp9 - tmp2 tmp11 = tmp2 + tmp10 tl.store(out_ptr1 + (x2 + (16*y3)), tmp11, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3v/c3vm2zhhi4rpntwqf5s6qyo7xy2np5dpqrt33ekkvvgqavmjkw6p.py # Topologically Sorted Source Nodes: [z_q, sub_1], Original ATen: [aten.embedding, aten.sub, aten.pow, aten.mul] # Source node to ATen node mapping: # sub_1 => sub_1 # z_q => embedding # Graph fragment: # %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_4, %view_1), kwargs = {}) # %sub_1 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%embedding, %permute), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_5, 2.0), kwargs = {}) triton_poi_fused_embedding_mul_pow_sub_4 = async_compile.triton('triton_poi_fused_embedding_mul_pow_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_mul_pow_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_embedding_mul_pow_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y3 = yindex x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) tmp0 = tl.load(in_ptr0 + (y3), ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, YBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(ymask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (x2 + (4*tmp4)), xmask & ymask) tmp9 = tmp7 + tmp8 tmp10 = tmp6 - tmp9 tmp11 = 2.0 tmp12 = tmp10 * tmp11 tl.store(out_ptr0 + (x2 + (4*y3)), tmp12, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [z_e], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((64, 4), (1, 64), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf0, primals_3, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, matmul], Original ATen: [aten.mul, aten.mm] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf4 = empty_strided_cuda((64, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [pow_1, sum_1, sub, dist, neg, max_1], Original ATen: [aten.pow, aten.sum, aten.sub, aten.add, aten.neg, aten.max] triton_poi_fused_add_max_neg_pow_sub_sum_1.run(buf0, primals_3, buf2, primals_4, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((), (), torch.float32) buf9 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [z_q, sub_1, pow_3, mean, mul_1, diff, diff_1], Original ATen: [aten.embedding, aten.sub, aten.pow, aten.mean, aten.mul, aten.add] triton_per_fused_add_embedding_mean_mul_pow_sub_2.run(buf9, buf4, primals_4, buf0, primals_3, 1, 256, grid=grid(1), stream=stream0) buf7 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [z_q, sub_1, z_q_1, z_q_2], Original ATen: [aten.embedding, aten.sub, aten.add, aten.permute] triton_poi_fused_add_embedding_permute_sub_3.run(buf0, primals_3, buf4, primals_4, buf7, 16, 16, grid=grid(16, 16), stream=stream0) buf8 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [z_q, sub_1], Original ATen: [aten.embedding, aten.sub, aten.pow, aten.mul] triton_poi_fused_embedding_mul_pow_sub_4.run(buf4, primals_4, buf0, primals_3, buf8, 64, 4, grid=grid(64, 4), stream=stream0) del buf0 del primals_3 del primals_4 return (buf7, buf9, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), primals_1, primals_2, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F from scipy.cluster.vq import kmeans2 class VQVAEQuantize(nn.Module): """ Neural Discrete Representation Learning, van den Oord et al. 2017 https://arxiv.org/abs/1711.00937 Follows the original DeepMind implementation https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py https://github.com/deepmind/sonnet/blob/v2/examples/vqvae_example.ipynb """ def __init__(self, num_hiddens, n_embed, embedding_dim): super().__init__() self.embedding_dim = embedding_dim self.n_embed = n_embed self.kld_scale = 10.0 self.proj = nn.Conv2d(num_hiddens, embedding_dim, 1) self.embed = nn.Embedding(n_embed, embedding_dim) self.register_buffer('data_initialized', torch.zeros(1)) def forward(self, z): B, _C, H, W = z.size() z_e = self.proj(z) z_e = z_e.permute(0, 2, 3, 1) flatten = z_e.reshape(-1, self.embedding_dim) if self.training and self.data_initialized.item() == 0: None rp = torch.randperm(flatten.size(0)) kd = kmeans2(flatten[rp[:20000]].data.cpu().numpy(), self. n_embed, minit='points') self.embed.weight.data.copy_(torch.from_numpy(kd[0])) self.data_initialized.fill_(1) dist = flatten.pow(2).sum(1, keepdim=True ) - 2 * flatten @ self.embed.weight.t() + self.embed.weight.pow(2 ).sum(1, keepdim=True).t() _, ind = (-dist).max(1) ind = ind.view(B, H, W) z_q = self.embed_code(ind) commitment_cost = 0.25 diff = commitment_cost * (z_q.detach() - z_e).pow(2).mean() + (z_q - z_e.detach()).pow(2).mean() diff *= self.kld_scale z_q = z_e + (z_q - z_e).detach() z_q = z_q.permute(0, 3, 1, 2) return z_q, diff, ind def embed_code(self, embed_id): return F.embedding(embed_id, self.embed.weight) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_hiddens': 4, 'n_embed': 4, 'embedding_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (x0 // 16) + x0 % 16), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_max_neg_pow_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (64 * (x0 // 16) + x0 % 16), xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp5 = tl.load(in_ptr0 + (16 + 64 * (x0 // 16) + x0 % 16), xmask) tmp6 = tl.load(in_ptr1 + 1) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp11 = tl.load(in_ptr0 + (32 + 64 * (x0 // 16) + x0 % 16), xmask) tmp12 = tl.load(in_ptr1 + 2) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp17 = tl.load(in_ptr0 + (48 + 64 * (x0 // 16) + x0 % 16), xmask) tmp18 = tl.load(in_ptr1 + 3) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp23 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + 0) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp28 = tl.load(in_ptr3 + 1) tmp29 = tl.broadcast_to(tmp28, [XBLOCK]) tmp32 = tl.load(in_ptr3 + 2) tmp33 = tl.broadcast_to(tmp32, [XBLOCK]) tmp36 = tl.load(in_ptr3 + 3) tmp37 = tl.broadcast_to(tmp36, [XBLOCK]) tmp42 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp44 = tl.load(in_ptr3 + 4) tmp45 = tl.broadcast_to(tmp44, [XBLOCK]) tmp47 = tl.load(in_ptr3 + 5) tmp48 = tl.broadcast_to(tmp47, [XBLOCK]) tmp51 = tl.load(in_ptr3 + 6) tmp52 = tl.broadcast_to(tmp51, [XBLOCK]) tmp55 = tl.load(in_ptr3 + 7) tmp56 = tl.broadcast_to(tmp55, [XBLOCK]) tmp76 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp78 = tl.load(in_ptr3 + 8) tmp79 = tl.broadcast_to(tmp78, [XBLOCK]) tmp81 = tl.load(in_ptr3 + 9) tmp82 = tl.broadcast_to(tmp81, [XBLOCK]) tmp85 = tl.load(in_ptr3 + 10) tmp86 = tl.broadcast_to(tmp85, [XBLOCK]) tmp89 = tl.load(in_ptr3 + 11) tmp90 = tl.broadcast_to(tmp89, [XBLOCK]) tmp109 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp111 = tl.load(in_ptr3 + 12) tmp112 = tl.broadcast_to(tmp111, [XBLOCK]) tmp114 = tl.load(in_ptr3 + 13) tmp115 = tl.broadcast_to(tmp114, [XBLOCK]) tmp118 = tl.load(in_ptr3 + 14) tmp119 = tl.broadcast_to(tmp118, [XBLOCK]) tmp122 = tl.load(in_ptr3 + 15) tmp123 = tl.broadcast_to(tmp122, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tmp3 * tmp3 tmp8 = tmp5 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp14 = tmp11 + tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp20 = tmp17 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp24 = tmp22 - tmp23 tmp27 = tmp26 * tmp26 tmp30 = tmp29 * tmp29 tmp31 = tmp27 + tmp30 tmp34 = tmp33 * tmp33 tmp35 = tmp31 + tmp34 tmp38 = tmp37 * tmp37 tmp39 = tmp35 + tmp38 tmp40 = tmp24 + tmp39 tmp41 = -tmp40 tmp43 = tmp22 - tmp42 tmp46 = tmp45 * tmp45 tmp49 = tmp48 * tmp48 tmp50 = tmp46 + tmp49 tmp53 = tmp52 * tmp52 tmp54 = tmp50 + tmp53 tmp57 = tmp56 * tmp56 tmp58 = tmp54 + tmp57 tmp59 = tmp43 + tmp58 tmp60 = -tmp59 tmp61 = tmp41 > tmp60 tmp62 = tmp41 == tmp60 tmp63 = tmp41 != tmp41 tmp64 = tmp60 != tmp60 tmp65 = tmp63 > tmp64 tmp66 = tmp61 | tmp65 tmp67 = tmp63 & tmp64 tmp68 = tmp62 | tmp67 tmp69 = tl.full([1], 0, tl.int64) tmp70 = tl.full([1], 1, tl.int64) tmp71 = tmp69 < tmp70 tmp72 = tmp68 & tmp71 tmp73 = tmp66 | tmp72 tmp74 = tl.where(tmp73, tmp41, tmp60) tmp75 = tl.where(tmp73, tmp69, tmp70) tmp77 = tmp22 - tmp76 tmp80 = tmp79 * tmp79 tmp83 = tmp82 * tmp82 tmp84 = tmp80 + tmp83 tmp87 = tmp86 * tmp86 tmp88 = tmp84 + tmp87 tmp91 = tmp90 * tmp90 tmp92 = tmp88 + tmp91 tmp93 = tmp77 + tmp92 tmp94 = -tmp93 tmp95 = tmp74 > tmp94 tmp96 = tmp74 == tmp94 tmp97 = tmp74 != tmp74 tmp98 = tmp94 != tmp94 tmp99 = tmp97 > tmp98 tmp100 = tmp95 | tmp99 tmp101 = tmp97 & tmp98 tmp102 = tmp96 | tmp101 tmp103 = tl.full([1], 2, tl.int64) tmp104 = tmp75 < tmp103 tmp105 = tmp102 & tmp104 tmp106 = tmp100 | tmp105 tmp107 = tl.where(tmp106, tmp74, tmp94) tmp108 = tl.where(tmp106, tmp75, tmp103) tmp110 = tmp22 - tmp109 tmp113 = tmp112 * tmp112 tmp116 = tmp115 * tmp115 tmp117 = tmp113 + tmp116 tmp120 = tmp119 * tmp119 tmp121 = tmp117 + tmp120 tmp124 = tmp123 * tmp123 tmp125 = tmp121 + tmp124 tmp126 = tmp110 + tmp125 tmp127 = -tmp126 tmp128 = tmp107 > tmp127 tmp129 = tmp107 == tmp127 tmp130 = tmp107 != tmp107 tmp131 = tmp127 != tmp127 tmp132 = tmp130 > tmp131 tmp133 = tmp128 | tmp132 tmp134 = tmp130 & tmp131 tmp135 = tmp129 | tmp134 tmp136 = tl.full([1], 3, tl.int64) tmp137 = tmp108 < tmp136 tmp138 = tmp135 & tmp137 tmp139 = tmp133 | tmp138 tl.where(tmp139, tmp107, tmp127) tmp141 = tl.where(tmp139, tmp108, tmp136) tl.store(out_ptr1 + x0, tmp141, xmask) @triton.jit def triton_per_fused_add_embedding_mean_mul_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r2 = rindex // 64 r1 = rindex // 16 % 4 r3 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 16 * r2), None, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr2 + r3, None) tmp8 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last') tmp1 = tl.full([RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (r1 + 4 * tmp4), None, eviction_policy= 'evict_last') tmp9 = tmp7 + tmp8 tmp10 = tmp6 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = 0.25 tmp18 = tmp16 * tmp17 tmp19 = tmp18 + tmp16 tmp20 = 10.0 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) @triton.jit def triton_poi_fused_add_embedding_permute_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2 + 16 * y1), xmask & ymask, eviction_policy ='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tl.full([XBLOCK, YBLOCK], 4, tl.int32) tmp5 = tmp3 + tmp4 tmp6 = tmp3 < 0 tmp7 = tl.where(tmp6, tmp5, tmp3) tl.device_assert((0 <= tmp7) & (tmp7 < 4) | ~(xmask & ymask), 'index out of bounds: 0 <= tmp7 < 4') tmp9 = tl.load(in_ptr3 + (y0 + 4 * tmp7), xmask & ymask) tmp10 = tmp9 - tmp2 tmp11 = tmp2 + tmp10 tl.store(out_ptr1 + (x2 + 16 * y3), tmp11, xmask & ymask) @triton.jit def triton_poi_fused_embedding_mul_pow_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y3 = yindex x2 = xindex y0 = yindex % 16 y1 = yindex // 16 tmp0 = tl.load(in_ptr0 + y3, ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK, YBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~ymask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (x2 + 4 * tmp4), xmask & ymask) tmp9 = tmp7 + tmp8 tmp10 = tmp6 - tmp9 tmp11 = 2.0 tmp12 = tmp10 * tmp11 tl.store(out_ptr0 + (x2 + 4 * y3), tmp12, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((64, 4), (1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](buf0, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf4 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_max_neg_pow_sub_sum_1[grid(64)](buf0, primals_3, buf2, primals_4, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((), (), torch.float32) buf9 = buf5 del buf5 triton_per_fused_add_embedding_mean_mul_pow_sub_2[grid(1)](buf9, buf4, primals_4, buf0, primals_3, 1, 256, num_warps=2, num_stages=1 ) buf7 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_embedding_permute_sub_3[grid(16, 16)](buf0, primals_3, buf4, primals_4, buf7, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_embedding_mul_pow_sub_4[grid(64, 4)](buf4, primals_4, buf0, primals_3, buf8, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) del buf0 del primals_3 del primals_4 return buf7, buf9, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0 ), primals_1, primals_2, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), buf8 class VQVAEQuantizeNew(nn.Module): """ Neural Discrete Representation Learning, van den Oord et al. 2017 https://arxiv.org/abs/1711.00937 Follows the original DeepMind implementation https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py https://github.com/deepmind/sonnet/blob/v2/examples/vqvae_example.ipynb """ def __init__(self, num_hiddens, n_embed, embedding_dim): super().__init__() self.embedding_dim = embedding_dim self.n_embed = n_embed self.kld_scale = 10.0 self.proj = nn.Conv2d(num_hiddens, embedding_dim, 1) self.embed = nn.Embedding(n_embed, embedding_dim) self.register_buffer('data_initialized', torch.zeros(1)) def embed_code(self, embed_id): return F.embedding(embed_id, self.embed.weight) def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_4 = self.embed.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1], output[2]
crizCraig/deep-vector-quantization
VQVAEQuantize
false
15,103
[ "MIT" ]
326
c3c026a1ccea369bc892ad6dde5e6d6cd5a508a4
https://github.com/crizCraig/deep-vector-quantization/tree/c3c026a1ccea369bc892ad6dde5e6d6cd5a508a4
ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/nq/cnqgpozfupmeoly3clr6c5kuqetg544kqc2babbdai3pypst4tge.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [attn_2, output], Original ATen: [aten._softmax, aten.bmm] extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class ScaledDotProductAttention(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super(ScaledDotProductAttention, self).__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, q, k, v, mask=None): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature if mask is not None: attn = attn.masked_fill(mask, -np.inf) attn = self.softmax(attn) attn = self.dropout(attn) output = torch.bmm(attn, v) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 del buf2 return buf3, class ScaledDotProductAttentionNew(nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super(ScaledDotProductAttentionNew, self).__init__() self.temperature = temperature self.dropout = nn.Dropout(attn_dropout) self.softmax = nn.Softmax(dim=2) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
dani3l125/TDNet
ScaledDotProductAttention
false
15,104
[ "MIT" ]
195
3f8b5378fcc7f97c26b3760ddaf3d4402cf477d1
https://github.com/dani3l125/TDNet/tree/3f8b5378fcc7f97c26b3760ddaf3d4402cf477d1
decoder3
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/sl/csl4a7zrb2bmk2gqqylwow2gogyvzwxmwgyvxw7a3clzd3kamlia.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 36864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kk/ckkeehad7xvjmxduxmwzjdm4zu3f5inttmd6kj4pju55xhwrnoea.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # out_3 => add, add_1, convert_element_type, convert_element_type_1, iota_2, mul, mul_1 # Graph fragment: # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {}) # %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_1 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ss/css6aor6t7co2yrsfntkuhw327ptuhxjf522t4mqchur6fvbwduu.py # Topologically Sorted Source Nodes: [out_1, out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # out_1 => convolution # out_2 => relu # out_3 => _unsafe_index_2 # out_4 => _unsafe_index_3, _unsafe_index_4 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, %sub_5, None]), kwargs = {}) # %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_3, [None, None, None, %sub_5]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 51200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 10) % 10 x0 = xindex % 10 x4 = (xindex // 100) x2 = (xindex // 100) % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (4*tmp4) + (16*x4)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x7), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rx/crxvgwobuflbwxr5tolxudkqfjwftsihtrcmsuagdqs5jnfa5m6p.py # Topologically Sorted Source Nodes: [out_5, out_6, out_7], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_5 => convolution_1 # out_6 => relu_1 # out_7 => _unsafe_index_5, _unsafe_index_6 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_4, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %sub_5, None]), kwargs = {}) # %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_5, [None, None, None, %sub_5]), kwargs = {}) triton_poi_fused_convolution_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 51200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 10 x1 = (xindex // 10) % 10 x4 = (xindex // 100) x2 = (xindex // 100) % 128 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0))))) + ((-8)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (64*x4)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x5), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7n/c7nyyvchtptp5rbqvks7cavg63gajcswx5tpkvypdmc7ocd2zaaz.py # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # out_9 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_8, mul_4, mul_5 # Graph fragment: # %iota_8 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_8, 1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {}) # %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_4 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rs/crsls3pmcaufikvptnrc4iu2p4kw6eqse34svczgyllufx42kkmd.py # Topologically Sorted Source Nodes: [out_8, out_relu9, out_9, out_10], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # out_10 => _unsafe_index_8, _unsafe_index_9 # out_8 => convolution_2 # out_9 => _unsafe_index_7 # out_relu9 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_6, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) # %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_2, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {}) # %_unsafe_index_8 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_7, [None, None, %sub_13, None]), kwargs = {}) # %_unsafe_index_9 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_8, [None, None, None, %sub_13]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 82944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 18) % 18 x0 = xindex % 18 x4 = (xindex // 324) x2 = (xindex // 324) % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1)))))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0)))))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (8*tmp4) + (64*x4)), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x7), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ys/cys33twmersisbvmamo2s7xddzpc4sxcq6r5s6nmj42c553grxlw.py # Topologically Sorted Source Nodes: [out_11, out_12, out_13], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_11 => convolution_3 # out_12 => relu_3 # out_13 => _unsafe_index_10, _unsafe_index_11 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_9, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %_unsafe_index_10 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_3, [None, None, %sub_13, None]), kwargs = {}) # %_unsafe_index_11 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_10, [None, None, None, %sub_13]), kwargs = {}) triton_poi_fused_convolution_reflection_pad2d_relu_6 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 82944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 18 x1 = (xindex // 18) % 18 x4 = (xindex // 324) x2 = (xindex // 324) % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x5), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/lt/cltg26mqvilmgyoclm5qrkhwhm6nqn6xkvimm7kt4prj6swrg65g.py # Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_14 => convolution_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_11, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 256) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4a/c4axvjodzpgbfldvkr4o6ry7gif45hov6igdcqgz5ovvvkzhagcd.py # Topologically Sorted Source Nodes: [out_11, out_12], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_11 => convolution_3 # out_12 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_9, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le_18 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yf/cyfzn7bsxfef4ewssoi6dlmwj6rlr343g2hxnugzycah5xukd255.py # Topologically Sorted Source Nodes: [out_8, out_relu9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_8 => convolution_2 # out_relu9 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_6, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) # %le_37 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kx/ckxuqcsqkxzu4nou6q3qnm3p7j3cigcnx5t7srrutb2ivuojmdbf.py # Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_5 => convolution_1 # out_6 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_4, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %le_56 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_10 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kx/ckx2yrqzgpdjiz447wrblf7xuaz6txjijuvwqp6nbceaqwtdq45g.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => convolution # out_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le_75 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_2, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_3, (128, ), (1, )) assert_size_stride(primals_4, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 6, 6), (9216, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 36864, grid=grid(36864), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 128, 4, 4), (2048, 16, 4, 1)) buf2 = empty_strided_cuda((8, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_1.run(buf2, 8, grid=grid(8), stream=stream0) buf3 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1, out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2.run(buf2, buf1, primals_3, buf3, 51200, grid=grid(51200), stream=stream0) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 8, 8), (8192, 64, 8, 1)) buf5 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_5, out_6, out_7], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf4, primals_5, buf5, 51200, grid=grid(51200), stream=stream0) # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 8, 8), (4096, 64, 8, 1)) buf7 = empty_strided_cuda((16, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_4.run(buf7, 16, grid=grid(16), stream=stream0) buf8 = empty_strided_cuda((4, 64, 18, 18), (20736, 324, 18, 1), torch.float32) # Topologically Sorted Source Nodes: [out_8, out_relu9, out_9, out_10], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5.run(buf7, buf6, primals_7, buf8, 82944, grid=grid(82944), stream=stream0) # Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 16, 16), (16384, 256, 16, 1)) buf10 = empty_strided_cuda((4, 64, 18, 18), (20736, 324, 18, 1), torch.float32) # Topologically Sorted Source Nodes: [out_11, out_12, out_13], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_6.run(buf9, primals_9, buf10, 82944, grid=grid(82944), stream=stream0) # Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 3, 16, 16), (768, 256, 16, 1)) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf12, primals_11, 3072, grid=grid(3072), stream=stream0) del primals_11 buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [out_11, out_12], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_8.run(buf9, primals_9, buf13, 65536, grid=grid(65536), stream=stream0) del buf9 del primals_9 buf14 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [out_8, out_relu9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_9.run(buf6, primals_7, buf14, 16384, grid=grid(16384), stream=stream0) del buf6 del primals_7 buf15 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_10.run(buf4, primals_5, buf15, 32768, grid=grid(32768), stream=stream0) del buf4 del primals_5 buf16 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_11.run(buf1, primals_3, buf16, 8192, grid=grid(8192), stream=stream0) del buf1 del primals_3 return (buf12, primals_2, primals_4, primals_6, primals_8, primals_10, buf0, buf2, buf3, buf5, buf7, buf8, buf10, buf13, buf14, buf15, buf16, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((3, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class decoder3(nn.Module): def __init__(self): super(decoder3, self).__init__() self.reflecPad7 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv7 = nn.Conv2d(256, 128, 3, 1, 0) self.relu7 = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad8 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv8 = nn.Conv2d(128, 128, 3, 1, 0) self.relu8 = nn.ReLU(inplace=True) self.reflecPad9 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv9 = nn.Conv2d(128, 64, 3, 1, 0) self.relu9 = nn.ReLU(inplace=True) self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad10 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv10 = nn.Conv2d(64, 64, 3, 1, 0) self.relu10 = nn.ReLU(inplace=True) self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv11 = nn.Conv2d(64, 3, 3, 1, 0) def forward(self, x): out = self.reflecPad7(x) out = self.conv7(out) out = self.relu7(out) out = self.unpool(out) out = self.reflecPad8(out) out = self.conv8(out) out = self.relu8(out) out = self.reflecPad9(out) out = self.conv9(out) out_relu9 = self.relu9(out) out = self.unpool2(out_relu9) out = self.reflecPad10(out) out = self.conv10(out) out = self.relu10(out) out = self.reflecPad11(out) out = self.conv11(out) return out def get_inputs(): return [torch.rand([4, 256, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 10 % 10 x0 = xindex % 10 x4 = xindex // 100 x2 = xindex // 100 % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1 ))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0 ))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 10 x1 = xindex // 10 % 10 x4 = xindex // 100 x2 = xindex // 100 % 128 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 82944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 18 % 18 x0 = xindex % 18 x4 = xindex // 324 x2 = xindex // 324 % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x1))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 82944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 18 x1 = xindex // 18 % 18 x4 = xindex // 324 x2 = xindex // 324 % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 3072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 256 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_2, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_3, (128,), (1,)) assert_size_stride(primals_4, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 6, 6), (9216, 36, 6, 1), torch. float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(36864)](primals_1, buf0, 36864, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 128, 4, 4), (2048, 16, 4, 1)) buf2 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK =8, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid (51200)](buf2, buf1, primals_3, buf3, 51200, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 8, 8), (8192, 64, 8, 1)) buf5 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(51200)](buf4, primals_5, buf5, 51200, XBLOCK=512, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 8, 8), (4096, 64, 8, 1)) buf7 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 64, 18, 18), (20736, 324, 18, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid (82944)](buf7, buf6, primals_7, buf8, 82944, XBLOCK=1024, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 16, 16), (16384, 256, 16, 1)) buf10 = empty_strided_cuda((4, 64, 18, 18), (20736, 324, 18, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(82944)](buf9, primals_9, buf10, 82944, XBLOCK=1024, num_warps=4, num_stages=1) buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 3, 16, 16), (768, 256, 16, 1)) buf12 = buf11 del buf11 triton_poi_fused_convolution_7[grid(3072)](buf12, primals_11, 3072, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(65536)]( buf9, primals_9, buf13, 65536, XBLOCK=256, num_warps=4, num_stages=1) del buf9 del primals_9 buf14 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_9[grid(16384)]( buf6, primals_7, buf14, 16384, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_7 buf15 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_10[grid(32768)]( buf4, primals_5, buf15, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 buf16 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_11[grid(8192)]( buf1, primals_3, buf16, 8192, XBLOCK=128, num_warps=4, num_stages=1 ) del buf1 del primals_3 return (buf12, primals_2, primals_4, primals_6, primals_8, primals_10, buf0, buf2, buf3, buf5, buf7, buf8, buf10, buf13, buf14, buf15, buf16) class decoder3New(nn.Module): def __init__(self): super(decoder3New, self).__init__() self.reflecPad7 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv7 = nn.Conv2d(256, 128, 3, 1, 0) self.relu7 = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad8 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv8 = nn.Conv2d(128, 128, 3, 1, 0) self.relu8 = nn.ReLU(inplace=True) self.reflecPad9 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv9 = nn.Conv2d(128, 64, 3, 1, 0) self.relu9 = nn.ReLU(inplace=True) self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad10 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv10 = nn.Conv2d(64, 64, 3, 1, 0) self.relu10 = nn.ReLU(inplace=True) self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv11 = nn.Conv2d(64, 3, 3, 1, 0) def forward(self, input_0): primals_2 = self.conv7.weight primals_3 = self.conv7.bias primals_4 = self.conv8.weight primals_5 = self.conv8.bias primals_6 = self.conv9.weight primals_7 = self.conv9.bias primals_8 = self.conv10.weight primals_9 = self.conv10.bias primals_10 = self.conv11.weight primals_11 = self.conv11.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
czczup/URST
decoder3
false
15,105
[ "Apache-2.0" ]
119
000ec9f7728f12ffad989ec1d07b1dd579514133
https://github.com/czczup/URST/tree/000ec9f7728f12ffad989ec1d07b1dd579514133
SelfCorrelationComputation
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bt/cbt6qjk7fstz5ggpexfnv3ppky3sc4orxitbywytxv37zen4wppf.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.div] # Source node to ATen node mapping: # x => relu # x_1 => div # Graph fragment: # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %expand), kwargs = {}) triton_poi_fused_div_relu_0 = async_compile.triton('triton_poi_fused_div_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp4 * tmp4 tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tmp7 * tmp7 tmp9 = tmp5 + tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = tmp11 * tmp11 tmp13 = tmp9 + tmp12 tmp15 = triton_helpers.maximum(tmp1, tmp14) tmp16 = tmp15 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = 1e-12 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp2 / tmp20 tl.store(out_ptr0 + (x3), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5k/c5kwqb4j33m5tead5aztj2v36763oo6xwjtzsilzkbg5wwq5unbw.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_5 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 32], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x4 = (xindex // 5) y1 = (yindex // 4) % 4 x3 = xindex % 5 y0 = yindex % 4 y6 = yindex x7 = xindex tmp12 = tl.load(in_ptr0 + (y6), ymask, eviction_policy='evict_last') tmp0 = (-2) + x4 + y1 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-2) + x3 + y0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-10) + x3 + y6 + (4*x4) + (16*((x3 + (5*x4)) // 25))), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 * tmp12 tl.store(out_ptr0 + (x7 + (25*y6)), tmp13, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bq/cbqmkbi3r74fv4szwqawdmmci3c7hiwdgpddujjpx5uc7uoqibme.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %relu), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr1 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4, 5, 5), (1600, 400, 100, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf0, buf1, 256, 25, grid=grid(256, 25), stream=stream0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(arg0_1, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SelfCorrelationComputation(nn.Module): def __init__(self, kernel_size=(5, 5), padding=2): super(SelfCorrelationComputation, self).__init__() self.kernel_size = kernel_size self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding) self.relu = nn.ReLU(inplace=True) def forward(self, x): b, c, h, w = x.shape x = self.relu(x) x = F.normalize(x, dim=1, p=2) identity = x x = self.unfold(x) x = x.view(b, c, self.kernel_size[0], self.kernel_size[1], h, w) x = x * identity.unsqueeze(2).unsqueeze(2) x = x.permute(0, 1, 4, 5, 2, 3).contiguous() return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp4 * tmp4 tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tmp7 * tmp7 tmp9 = tmp5 + tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = tmp11 * tmp11 tmp13 = tmp9 + tmp12 tmp15 = triton_helpers.maximum(tmp1, tmp14) tmp16 = tmp15 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = 1e-12 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp2 / tmp20 tl.store(out_ptr0 + x3, tmp21, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x4 = xindex // 5 y1 = yindex // 4 % 4 x3 = xindex % 5 y0 = yindex % 4 y6 = yindex x7 = xindex tmp12 = tl.load(in_ptr0 + y6, ymask, eviction_policy='evict_last') tmp0 = -2 + x4 + y1 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -2 + x3 + y0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-10 + x3 + y6 + 4 * x4 + 16 * ((x3 + 5 * x4) // 25)), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 * tmp12 tl.store(out_ptr0 + (x7 + 25 * y6), tmp13, xmask & ymask) @triton.jit def triton_poi_fused_relu_2(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4, 5, 5), (1600, 400, 100, 25, 5, 1), torch.float32) triton_poi_fused_clone_1[grid(256, 25)](buf0, buf1, 256, 25, XBLOCK =32, YBLOCK=32, num_warps=4, num_stages=1) triton_poi_fused_relu_2[grid(256)](arg0_1, arg0_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf0 return buf1, class SelfCorrelationComputationNew(nn.Module): def __init__(self, kernel_size=(5, 5), padding=2): super(SelfCorrelationComputationNew, self).__init__() self.kernel_size = kernel_size self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dahyun-kang/renet
SelfCorrelationComputation
false
15,106
[ "MIT" ]
50
43a4e5af96b56c99a0cd63e35bd272db72f7f3a4
https://github.com/dahyun-kang/renet/tree/43a4e5af96b56c99a0cd63e35bd272db72f7f3a4
discriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7g/c7ggngh5cc7qz3kazi2i3nbboiwlflyhfv7atomowhmdqeaxrw7n.py # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # out => convolution # out_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.02), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3631696 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 226981) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.02 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xq/cxq36qvuywew2c6dzj3rk2mxlscmsbfambixf7tcoubjt6r5i4wh.py # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # out_2 => convolution_1 # out_3 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2, 2], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.02), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 864000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 27000) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.02 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/52/c52lwpgoyl2umnax3xrfhoc2xsvhwsahg2mebcri5jzths25w6h5.py # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # out_4 => convolution_2 # out_5 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.02), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1404928 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 21952) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.02 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hv/chvevdlgz4bxjq2s53td4dqpsvl7zedhsqagv6bwrykfqc4a4ffo.py # Topologically Sorted Source Nodes: [out_6, out_7], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # out_6 => convolution_3 # out_7 => gt_3, mul_3, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.02), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {}) triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2249728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 17576) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.02 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xg/cxgfuogj7qoftos273agdszlcn6bovgnasksxc5z5hslwppjbd74.py # Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # out_8 => convolution_4 # out_9 => gt_4, mul_4, where_4 # Graph fragment: # %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_10, %primals_11, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.02), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3538944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 13824) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.02 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2f/c2fdm7vamdst63fiwo7sy4t5jl33zdqag4wco6iw3oahdht6vic7.py # Topologically Sorted Source Nodes: [out_10, out_11], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # out_10 => convolution_5 # out_11 => sigmoid # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_12, %primals_13, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_5,), kwargs = {}) triton_poi_fused_convolution_sigmoid_5 = async_compile.triton('triton_poi_fused_convolution_sigmoid_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 221184 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 13824) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x3), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (8, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_5, (8, ), (1, )) assert_size_stride(primals_6, (16, 8, 3, 3, 3), (216, 27, 9, 3, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (32, 16, 3, 3, 3), (432, 27, 9, 3, 1)) assert_size_stride(primals_9, (32, ), (1, )) assert_size_stride(primals_10, (64, 32, 3, 3, 3), (864, 27, 9, 3, 1)) assert_size_stride(primals_11, (64, ), (1, )) assert_size_stride(primals_12, (4, 64, 1, 1, 1), (64, 1, 1, 1, 1)) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 61, 61, 61), (907924, 226981, 3721, 61, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_3, 3631696, grid=grid(3631696), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 30, 30, 30), (216000, 27000, 900, 30, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, 864000, grid=grid(864000), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 28, 28, 28), (351232, 21952, 784, 28, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf5, primals_7, 1404928, grid=grid(1404928), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 26, 26, 26), (562432, 17576, 676, 26, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [out_6, out_7], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_3.run(buf7, primals_9, 2249728, grid=grid(2249728), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 24, 24, 24), (884736, 13824, 576, 24, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf9, primals_11, 3538944, grid=grid(3538944), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 24, 24, 24), (55296, 13824, 576, 24, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [out_10, out_11], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_5.run(buf11, primals_13, 221184, grid=grid(221184), stream=stream0) del primals_13 return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, buf1, buf3, buf5, buf7, buf9, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 8, 3, 3, 3), (216, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, 16, 3, 3, 3), (432, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((64, 32, 3, 3, 3), (864, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 64, 1, 1, 1), (64, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class discriminator(nn.Module): def __init__(self, d_dim, z_dim): super(discriminator, self).__init__() self.d_dim = d_dim self.z_dim = z_dim self.conv_1 = nn.Conv3d(1, self.d_dim, 4, stride=1, padding=0, bias =True) self.conv_2 = nn.Conv3d(self.d_dim, self.d_dim * 2, 3, stride=2, padding=0, bias=True) self.conv_3 = nn.Conv3d(self.d_dim * 2, self.d_dim * 4, 3, stride=1, padding=0, bias=True) self.conv_4 = nn.Conv3d(self.d_dim * 4, self.d_dim * 8, 3, stride=1, padding=0, bias=True) self.conv_5 = nn.Conv3d(self.d_dim * 8, self.d_dim * 16, 3, stride= 1, padding=0, bias=True) self.conv_6 = nn.Conv3d(self.d_dim * 16, self.z_dim, 1, stride=1, padding=0, bias=True) def forward(self, voxels, is_training=False): out = voxels out = self.conv_1(out) out = F.leaky_relu(out, negative_slope=0.02, inplace=True) out = self.conv_2(out) out = F.leaky_relu(out, negative_slope=0.02, inplace=True) out = self.conv_3(out) out = F.leaky_relu(out, negative_slope=0.02, inplace=True) out = self.conv_4(out) out = F.leaky_relu(out, negative_slope=0.02, inplace=True) out = self.conv_5(out) out = F.leaky_relu(out, negative_slope=0.02, inplace=True) out = self.conv_6(out) out = torch.sigmoid(out) return out def get_inputs(): return [torch.rand([4, 1, 64, 64, 64])] def get_init_inputs(): return [[], {'d_dim': 4, 'z_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3631696 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 226981 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.02 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 864000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 27000 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.02 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 21952 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.02 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2249728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 17576 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.02 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 13824 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.02 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_sigmoid_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 13824 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (8, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (16, 8, 3, 3, 3), (216, 27, 9, 3, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (32, 16, 3, 3, 3), (432, 27, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (64, 32, 3, 3, 3), (864, 27, 9, 3, 1)) assert_size_stride(primals_11, (64,), (1,)) assert_size_stride(primals_12, (4, 64, 1, 1, 1), (64, 1, 1, 1, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 61, 61, 61), (907924, 226981, 3721, 61, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(3631696)](buf1, primals_3, 3631696, XBLOCK=512, num_warps=8, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 30, 30, 30), (216000, 27000, 900, 30, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_1[grid(864000)](buf3, primals_5, 864000, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 28, 28, 28), (351232, 21952, 784, 28, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(1404928)](buf5, primals_7, 1404928, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 26, 26, 26), (562432, 17576, 676, 26, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_leaky_relu_3[grid(2249728)](buf7, primals_9, 2249728, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1, 1 ), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 24, 24, 24), (884736, 13824, 576, 24, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_leaky_relu_4[grid(3538944)](buf9, primals_11, 3538944, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 24, 24, 24), (55296, 13824, 576, 24, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_sigmoid_5[grid(221184)](buf11, primals_13, 221184, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, buf1, buf3, buf5, buf7, buf9, buf11) class discriminatorNew(nn.Module): def __init__(self, d_dim, z_dim): super(discriminatorNew, self).__init__() self.d_dim = d_dim self.z_dim = z_dim self.conv_1 = nn.Conv3d(1, self.d_dim, 4, stride=1, padding=0, bias =True) self.conv_2 = nn.Conv3d(self.d_dim, self.d_dim * 2, 3, stride=2, padding=0, bias=True) self.conv_3 = nn.Conv3d(self.d_dim * 2, self.d_dim * 4, 3, stride=1, padding=0, bias=True) self.conv_4 = nn.Conv3d(self.d_dim * 4, self.d_dim * 8, 3, stride=1, padding=0, bias=True) self.conv_5 = nn.Conv3d(self.d_dim * 8, self.d_dim * 16, 3, stride= 1, padding=0, bias=True) self.conv_6 = nn.Conv3d(self.d_dim * 16, self.z_dim, 1, stride=1, padding=0, bias=True) def forward(self, input_0): primals_2 = self.conv_1.weight primals_3 = self.conv_1.bias primals_4 = self.conv_2.weight primals_5 = self.conv_2.bias primals_6 = self.conv_3.weight primals_7 = self.conv_3.bias primals_8 = self.conv_4.weight primals_9 = self.conv_4.bias primals_10 = self.conv_5.weight primals_11 = self.conv_5.bias primals_12 = self.conv_6.weight primals_13 = self.conv_6.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
czq142857/DECOR-GAN
discriminator
false
15,107
[ "MIT" ]
55
79c80fc202b8af982989a3e3bb3afe85e606b71f
https://github.com/czq142857/DECOR-GAN/tree/79c80fc202b8af982989a3e3bb3afe85e606b71f
EntmaxBisect
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/jl/cjlewkazu5swf57ve2vqouzokbgz2nyjl3wq5slxk6ufxvzdg2y4.py # Topologically Sorted Source Nodes: [sub, X, max_1, pow_2, tau_hi, pow_1, tau_lo, dm, dm_1, tau_m, sub_9, clamp_1, truediv_1, p_m, sum_2, sub_5, clamp, truediv, pow_3, sum_1, f_lo, tau_lo_1, dm_2, tau_m_1, sub_12, clamp_2, truediv_2, p_m_1, sum_3, f_m_1, mul_2, tau_lo_2, dm_3, tau_m_2, sub_15, clamp_3, truediv_3, p_m_2, sum_4, f_m_2, mul_3, tau_lo_3, dm_4, tau_m_3, sub_18, clamp_4, truediv_4, p_m_3, sum_5, f_m_3, mul_4, tau_lo_4, dm_5, tau_m_4, sub_21, clamp_5, truediv_5, p_m_4, sum_6, f_m_4, mul_5, tau_lo_5, dm_6, tau_m_5, sub_24, clamp_6, truediv_6, p_m_5, sum_7, f_m_5, mul_6, tau_lo_6, dm_7, tau_m_6, sub_27, clamp_7, truediv_7, p_m_6, sum_8, f_m_6, mul_7, tau_lo_7, dm_8, tau_m_7, sub_30, clamp_8, truediv_8, p_m_7, sum_9, f_m_7, mul_8, tau_lo_8, dm_9, tau_m_8, sub_33, clamp_9, truediv_9, p_m_8, sum_10, f_m_8, mul_9, tau_lo_9, dm_10, tau_m_9, sub_36, clamp_10, truediv_10, p_m_9, sum_11, f_m_9, mul_10, tau_lo_10, dm_11, tau_m_10, sub_39, clamp_11, truediv_11, p_m_10, sum_12, f_m_10, mul_11, tau_lo_11, dm_12, tau_m_11, sub_42, clamp_12, truediv_12, p_m_11, sum_13, f_m_11, mul_12, tau_lo_12, dm_13, tau_m_12, sub_45, clamp_13, truediv_13, p_m_12, sum_14, f_m_12, mul_13, tau_lo_13, dm_14, tau_m_13, sub_48, clamp_14, truediv_14, p_m_13, sum_15, f_m_13, mul_14, tau_lo_14, dm_15, tau_m_14, sub_51, clamp_15, truediv_15, p_m_14, sum_16, tau_lo_15, dm_16, tau_m_15, sub_54, clamp_16, truediv_16, p_m_15, sum_17, f_m_15, tau_lo_16, dm_17, tau_m_16, sub_57, clamp_17, truediv_17, p_m_16, sum_18, tau_lo_17, dm_18, tau_m_17, sub_60, clamp_18, truediv_18, p_m_17, sum_19, tau_lo_18, dm_19, tau_m_18, sub_63, clamp_19, truediv_19, p_m_18, sum_20, tau_lo_19, dm_20, tau_m_19, sub_66, clamp_20, truediv_20, p_m_19, sum_21, tau_lo_20, dm_21, tau_m_20, sub_69, clamp_21, truediv_21, p_m_20, sum_22, tau_lo_21, dm_22, tau_m_21, sub_72, clamp_22, truediv_22, p_m_21, sum_23, tau_lo_22, dm_23, tau_m_22, sub_75, clamp_23, truediv_23, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_78, clamp_24, truediv_24, p_m_23, sum_25, tau_lo_24, dm_25, tau_m_24, sub_81, clamp_25, truediv_25, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_84, clamp_26, truediv_26, p_m_25, sum_27], Original ATen: [aten.sub, aten.mul, aten.max, aten.pow, aten.div, aten.add, aten.clamp, aten.sum, aten.where] # Source node to ATen node mapping: # X => mul # clamp => clamp_min # clamp_1 => clamp_min_1 # clamp_10 => clamp_min_10 # clamp_11 => clamp_min_11 # clamp_12 => clamp_min_12 # clamp_13 => clamp_min_13 # clamp_14 => clamp_min_14 # clamp_15 => clamp_min_15 # clamp_16 => clamp_min_16 # clamp_17 => clamp_min_17 # clamp_18 => clamp_min_18 # clamp_19 => clamp_min_19 # clamp_2 => clamp_min_2 # clamp_20 => clamp_min_20 # clamp_21 => clamp_min_21 # clamp_22 => clamp_min_22 # clamp_23 => clamp_min_23 # clamp_24 => clamp_min_24 # clamp_25 => clamp_min_25 # clamp_26 => clamp_min_26 # clamp_3 => clamp_min_3 # clamp_4 => clamp_min_4 # clamp_5 => clamp_min_5 # clamp_6 => clamp_min_6 # clamp_7 => clamp_min_7 # clamp_8 => clamp_min_8 # clamp_9 => clamp_min_9 # dm => sub_8 # dm_1 => div # dm_10 => div_9 # dm_11 => div_10 # dm_12 => div_11 # dm_13 => div_12 # dm_14 => div_13 # dm_15 => div_14 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_2 => div_1 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_3 => div_2 # dm_4 => div_3 # dm_5 => div_4 # dm_6 => div_5 # dm_7 => div_6 # dm_8 => div_7 # dm_9 => div_8 # f_lo => sub_7 # f_m_1 => sub_14 # f_m_10 => sub_41 # f_m_11 => sub_44 # f_m_12 => sub_47 # f_m_13 => sub_50 # f_m_15 => sub_56 # f_m_2 => sub_17 # f_m_3 => sub_20 # f_m_4 => sub_23 # f_m_5 => sub_26 # f_m_6 => sub_29 # f_m_7 => sub_32 # f_m_8 => sub_35 # f_m_9 => sub_38 # max_1 => max_1 # mul_10 => mul_21 # mul_11 => mul_23 # mul_12 => mul_25 # mul_13 => mul_27 # mul_14 => mul_29 # mul_2 => mul_5 # mul_3 => mul_7 # mul_4 => mul_9 # mul_5 => mul_11 # mul_6 => mul_13 # mul_7 => mul_15 # mul_8 => mul_17 # mul_9 => mul_19 # p_m => pow_4 # p_m_1 => pow_5 # p_m_10 => pow_14 # p_m_11 => pow_15 # p_m_12 => pow_16 # p_m_13 => pow_17 # p_m_14 => pow_18 # p_m_15 => pow_19 # p_m_16 => pow_20 # p_m_17 => pow_21 # p_m_18 => pow_22 # p_m_19 => pow_23 # p_m_2 => pow_6 # p_m_20 => pow_24 # p_m_21 => pow_25 # p_m_22 => pow_26 # p_m_23 => pow_27 # p_m_24 => pow_28 # p_m_25 => pow_29 # p_m_3 => pow_7 # p_m_4 => pow_8 # p_m_5 => pow_9 # p_m_6 => pow_10 # p_m_7 => pow_11 # p_m_8 => pow_12 # p_m_9 => pow_13 # pow_1 => full_default_1 # pow_2 => full_default_2 # pow_3 => pow_3 # sub => full_default # sub_12 => sub_12 # sub_15 => sub_15 # sub_18 => sub_18 # sub_21 => sub_21 # sub_24 => sub_24 # sub_27 => sub_27 # sub_30 => sub_30 # sub_33 => sub_33 # sub_36 => sub_36 # sub_39 => sub_39 # sub_42 => sub_42 # sub_45 => sub_45 # sub_48 => sub_48 # sub_5 => sub_5 # sub_51 => sub_51 # sub_54 => sub_54 # sub_57 => sub_57 # sub_60 => sub_60 # sub_63 => sub_63 # sub_66 => sub_66 # sub_69 => sub_69 # sub_72 => sub_72 # sub_75 => sub_75 # sub_78 => sub_78 # sub_81 => sub_81 # sub_84 => sub_84 # sub_9 => sub_9 # sum_1 => sum_1 # sum_10 => sum_10 # sum_11 => sum_11 # sum_12 => sum_12 # sum_13 => sum_13 # sum_14 => sum_14 # sum_15 => sum_15 # sum_16 => sum_16 # sum_17 => sum_17 # sum_18 => sum_18 # sum_19 => sum_19 # sum_2 => sum_2 # sum_20 => sum_20 # sum_21 => sum_21 # sum_22 => sum_22 # sum_23 => sum_23 # sum_24 => sum_24 # sum_25 => sum_25 # sum_26 => sum_26 # sum_27 => sum_27 # sum_3 => sum_3 # sum_4 => sum_4 # sum_5 => sum_5 # sum_6 => sum_6 # sum_7 => sum_7 # sum_8 => sum_8 # sum_9 => sum_9 # tau_hi => sub_4 # tau_lo => sub_2 # tau_lo_1 => where # tau_lo_10 => where_9 # tau_lo_11 => where_10 # tau_lo_12 => where_11 # tau_lo_13 => where_12 # tau_lo_14 => where_13 # tau_lo_15 => where_14 # tau_lo_16 => where_15 # tau_lo_17 => where_16 # tau_lo_18 => where_17 # tau_lo_19 => where_18 # tau_lo_2 => where_1 # tau_lo_20 => where_19 # tau_lo_21 => where_20 # tau_lo_22 => where_21 # tau_lo_23 => where_22 # tau_lo_24 => where_23 # tau_lo_25 => where_24 # tau_lo_3 => where_2 # tau_lo_4 => where_3 # tau_lo_5 => where_4 # tau_lo_6 => where_5 # tau_lo_7 => where_6 # tau_lo_8 => where_7 # tau_lo_9 => where_8 # tau_m => add # tau_m_1 => add_1 # tau_m_10 => add_10 # tau_m_11 => add_11 # tau_m_12 => add_12 # tau_m_13 => add_13 # tau_m_14 => add_14 # tau_m_15 => add_15 # tau_m_16 => add_16 # tau_m_17 => add_17 # tau_m_18 => add_18 # tau_m_19 => add_19 # tau_m_2 => add_2 # tau_m_20 => add_20 # tau_m_21 => add_21 # tau_m_22 => add_22 # tau_m_23 => add_23 # tau_m_24 => add_24 # tau_m_25 => add_25 # tau_m_3 => add_3 # tau_m_4 => add_4 # tau_m_5 => add_5 # tau_m_6 => add_6 # tau_m_7 => add_7 # tau_m_8 => add_8 # tau_m_9 => add_9 # truediv => full_default_3 # truediv_1 => full_default_4 # truediv_10 => full_default_13 # truediv_11 => full_default_14 # truediv_12 => full_default_15 # truediv_13 => full_default_16 # truediv_14 => full_default_17 # truediv_15 => full_default_18 # truediv_16 => full_default_19 # truediv_17 => full_default_20 # truediv_18 => full_default_21 # truediv_19 => full_default_22 # truediv_2 => full_default_5 # truediv_20 => full_default_23 # truediv_21 => full_default_24 # truediv_22 => full_default_25 # truediv_23 => full_default_26 # truediv_24 => full_default_27 # truediv_25 => full_default_28 # truediv_26 => full_default_29 # truediv_3 => full_default_6 # truediv_4 => full_default_7 # truediv_5 => full_default_8 # truediv_6 => full_default_9 # truediv_7 => full_default_10 # truediv_8 => full_default_11 # truediv_9 => full_default_12 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, -1, True), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %full_default_2), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_2 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %full_default_1), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_4, %sub_2), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_8, 2), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, %div), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0), kwargs = {}) # %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_1, %full_default_4), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_4, [-1]), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_2), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min, %full_default_3), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1]), kwargs = {}) # %sub_7 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze, %add, %sub_2), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 2), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %div_1), kwargs = {}) # %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_1), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_12, 0), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_2, %full_default_5), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [-1]), kwargs = {}) # %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, 1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %sub_7), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_1, %add_1, %where), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_1, 2), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %div_2), kwargs = {}) # %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_2), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_15, 0), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_3, %full_default_6), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_6, [-1]), kwargs = {}) # %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_4, 1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %sub_7), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_2, %add_2, %where_1), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_2, %div_3), kwargs = {}) # %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_3), kwargs = {}) # %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_18, 0), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_4, %full_default_7), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [-1]), kwargs = {}) # %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_5, 1), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %sub_7), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_3, %add_3, %where_2), kwargs = {}) # %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_3, 2), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_3, %div_4), kwargs = {}) # %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_4), kwargs = {}) # %clamp_min_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_21, 0), kwargs = {}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_5, %full_default_8), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_8, [-1]), kwargs = {}) # %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_6, 1), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %sub_7), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_4, %add_4, %where_3), kwargs = {}) # %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_4, 2), kwargs = {}) # %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_4, %div_5), kwargs = {}) # %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_5), kwargs = {}) # %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_24, 0), kwargs = {}) # %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_6, %full_default_9), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_9, [-1]), kwargs = {}) # %sub_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_7, 1), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_26, %sub_7), kwargs = {}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_5, %add_5, %where_4), kwargs = {}) # %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_5, 2), kwargs = {}) # %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_5, %div_6), kwargs = {}) # %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_6), kwargs = {}) # %clamp_min_7 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_27, 0), kwargs = {}) # %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_10 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_7, %full_default_10), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_10, [-1]), kwargs = {}) # %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_8, 1), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_29, %sub_7), kwargs = {}) # %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_6, %add_6, %where_5), kwargs = {}) # %div_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_6, 2), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_6, %div_7), kwargs = {}) # %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_7), kwargs = {}) # %clamp_min_8 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_30, 0), kwargs = {}) # %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_11 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_8, %full_default_11), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_11, [-1]), kwargs = {}) # %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_9, 1), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %sub_7), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_7, %add_7, %where_6), kwargs = {}) # %div_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_7, 2), kwargs = {}) # %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_7, %div_8), kwargs = {}) # %sub_33 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_8), kwargs = {}) # %clamp_min_9 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_33, 0), kwargs = {}) # %full_default_12 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_12 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_9, %full_default_12), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_12, [-1]), kwargs = {}) # %sub_35 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_10, 1), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_35, %sub_7), kwargs = {}) # %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_8, %add_8, %where_7), kwargs = {}) # %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_8, 2), kwargs = {}) # %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_8, %div_9), kwargs = {}) # %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_9), kwargs = {}) # %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_36, 0), kwargs = {}) # %full_default_13 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_13 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_10, %full_default_13), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_13, [-1]), kwargs = {}) # %sub_38 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_11, 1), kwargs = {}) # %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_38, %sub_7), kwargs = {}) # %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_9, %add_9, %where_8), kwargs = {}) # %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_9, 2), kwargs = {}) # %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_9, %div_10), kwargs = {}) # %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_10), kwargs = {}) # %clamp_min_11 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_39, 0), kwargs = {}) # %full_default_14 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_14 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_11, %full_default_14), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_14, [-1]), kwargs = {}) # %sub_41 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_12, 1), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_41, %sub_7), kwargs = {}) # %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_10, %add_10, %where_9), kwargs = {}) # %div_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_10, 2), kwargs = {}) # %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_10, %div_11), kwargs = {}) # %sub_42 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_11), kwargs = {}) # %clamp_min_12 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_42, 0), kwargs = {}) # %full_default_15 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_15 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_12, %full_default_15), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_15, [-1]), kwargs = {}) # %sub_44 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_13, 1), kwargs = {}) # %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_44, %sub_7), kwargs = {}) # %where_11 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_11, %add_11, %where_10), kwargs = {}) # %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_11, 2), kwargs = {}) # %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_11, %div_12), kwargs = {}) # %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_12), kwargs = {}) # %clamp_min_13 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_45, 0), kwargs = {}) # %full_default_16 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_16 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_13, %full_default_16), kwargs = {}) # %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_16, [-1]), kwargs = {}) # %sub_47 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_14, 1), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_47, %sub_7), kwargs = {}) # %where_12 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_12, %add_12, %where_11), kwargs = {}) # %div_13 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_12, 2), kwargs = {}) # %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_12, %div_13), kwargs = {}) # %sub_48 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_13), kwargs = {}) # %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_48, 0), kwargs = {}) # %full_default_17 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_17 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_14, %full_default_17), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_17, [-1]), kwargs = {}) # %sub_50 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_15, 1), kwargs = {}) # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_50, %sub_7), kwargs = {}) # %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_13, %add_13, %where_12), kwargs = {}) # %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_13, 2), kwargs = {}) # %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_13, %div_14), kwargs = {}) # %sub_51 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_14), kwargs = {}) # %clamp_min_15 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_51, 0), kwargs = {}) # %full_default_18 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_18 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_15, %full_default_18), kwargs = {}) # %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_18, [-1]), kwargs = {}) # %where_14 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_14, %add_14, %where_13), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %add_15 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_14, %div_15), kwargs = {}) # %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_15), kwargs = {}) # %clamp_min_16 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_54, 0), kwargs = {}) # %full_default_19 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_19 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_16, %full_default_19), kwargs = {}) # %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_19, [-1]), kwargs = {}) # %sub_56 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_17, 1), kwargs = {}) # %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_15, %add_15, %where_14), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %add_16 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_15, %div_16), kwargs = {}) # %sub_57 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_16), kwargs = {}) # %clamp_min_17 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_57, 0), kwargs = {}) # %full_default_20 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_20 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_17, %full_default_20), kwargs = {}) # %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_20, [-1]), kwargs = {}) # %where_16 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_16, %add_16, %where_15), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_16, %div_17), kwargs = {}) # %sub_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_17), kwargs = {}) # %clamp_min_18 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_60, 0), kwargs = {}) # %full_default_21 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_21 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_18, %full_default_21), kwargs = {}) # %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_21, [-1]), kwargs = {}) # %where_17 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_17, %add_17, %where_16), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_17, %div_18), kwargs = {}) # %sub_63 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_18), kwargs = {}) # %clamp_min_19 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_63, 0), kwargs = {}) # %full_default_22 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_22 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_19, %full_default_22), kwargs = {}) # %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_22, [-1]), kwargs = {}) # %where_18 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_18, %add_18, %where_17), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_18, %div_19), kwargs = {}) # %sub_66 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_19), kwargs = {}) # %clamp_min_20 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_66, 0), kwargs = {}) # %full_default_23 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_23 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_20, %full_default_23), kwargs = {}) # %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_23, [-1]), kwargs = {}) # %where_19 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_19, %add_19, %where_18), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_19, %div_20), kwargs = {}) # %sub_69 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_20), kwargs = {}) # %clamp_min_21 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_69, 0), kwargs = {}) # %full_default_24 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_24 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_21, %full_default_24), kwargs = {}) # %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_24, [-1]), kwargs = {}) # %where_20 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_20, %add_20, %where_19), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %add_21 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_20, %div_21), kwargs = {}) # %sub_72 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_21), kwargs = {}) # %clamp_min_22 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_72, 0), kwargs = {}) # %full_default_25 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_25 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_22, %full_default_25), kwargs = {}) # %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_25, [-1]), kwargs = {}) # %where_21 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_21, %add_21, %where_20), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %add_22 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_21, %div_22), kwargs = {}) # %sub_75 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_22), kwargs = {}) # %clamp_min_23 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_75, 0), kwargs = {}) # %full_default_26 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_26 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_23, %full_default_26), kwargs = {}) # %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_26, [-1]), kwargs = {}) # %where_22 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_22, %add_22, %where_21), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %add_23 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_22, %div_23), kwargs = {}) # %sub_78 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_23), kwargs = {}) # %clamp_min_24 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_78, 0), kwargs = {}) # %full_default_27 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_27 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_24, %full_default_27), kwargs = {}) # %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_27, [-1]), kwargs = {}) # %where_23 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_23, %add_23, %where_22), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %add_24 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_23, %div_24), kwargs = {}) # %sub_81 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_24), kwargs = {}) # %clamp_min_25 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_81, 0), kwargs = {}) # %full_default_28 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_28 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_25, %full_default_28), kwargs = {}) # %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_28, [-1]), kwargs = {}) # %where_24 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_24, %add_24, %where_23), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_24, %div_25), kwargs = {}) # %sub_84 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_25), kwargs = {}) # %clamp_min_26 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_84, 0), kwargs = {}) # %full_default_29 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_29 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_26, %full_default_29), kwargs = {}) # %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_29, [-1]), kwargs = {}) triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0', 'mutated_arg_names': ['in_out_ptr12'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0(in_out_ptr12, in_ptr0, out_ptr0, out_ptr19, out_ptr27, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 * tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 * tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp14 = tmp11 - tmp1 tmp15 = tmp14 - tmp13 tmp16 = tmp15 * tmp1 tmp17 = tmp13 + tmp16 tmp18 = tmp2 - tmp17 tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = 2.0 tmp22 = libdevice.pow(tmp20, tmp21) tmp23 = tmp4 - tmp17 tmp24 = triton_helpers.maximum(tmp23, tmp19) tmp25 = libdevice.pow(tmp24, tmp21) tmp26 = tmp22 + tmp25 tmp27 = tmp7 - tmp17 tmp28 = triton_helpers.maximum(tmp27, tmp19) tmp29 = libdevice.pow(tmp28, tmp21) tmp30 = tmp26 + tmp29 tmp31 = tmp10 - tmp17 tmp32 = triton_helpers.maximum(tmp31, tmp19) tmp33 = libdevice.pow(tmp32, tmp21) tmp34 = tmp30 + tmp33 tmp35 = tmp2 - tmp13 tmp36 = triton_helpers.maximum(tmp35, tmp19) tmp37 = libdevice.pow(tmp36, tmp21) tmp38 = tmp4 - tmp13 tmp39 = triton_helpers.maximum(tmp38, tmp19) tmp40 = libdevice.pow(tmp39, tmp21) tmp41 = tmp37 + tmp40 tmp42 = tmp7 - tmp13 tmp43 = triton_helpers.maximum(tmp42, tmp19) tmp44 = libdevice.pow(tmp43, tmp21) tmp45 = tmp41 + tmp44 tmp46 = tmp10 - tmp13 tmp47 = triton_helpers.maximum(tmp46, tmp19) tmp48 = libdevice.pow(tmp47, tmp21) tmp49 = tmp45 + tmp48 tmp50 = tmp34 - tmp12 tmp51 = tmp49 - tmp12 tmp52 = tmp50 * tmp51 tmp53 = tmp52 >= tmp19 tmp54 = tl.where(tmp53, tmp17, tmp13) tmp55 = tmp16 * tmp1 tmp56 = tmp54 + tmp55 tmp57 = tmp2 - tmp56 tmp58 = triton_helpers.maximum(tmp57, tmp19) tmp59 = libdevice.pow(tmp58, tmp21) tmp60 = tmp4 - tmp56 tmp61 = triton_helpers.maximum(tmp60, tmp19) tmp62 = libdevice.pow(tmp61, tmp21) tmp63 = tmp59 + tmp62 tmp64 = tmp7 - tmp56 tmp65 = triton_helpers.maximum(tmp64, tmp19) tmp66 = libdevice.pow(tmp65, tmp21) tmp67 = tmp63 + tmp66 tmp68 = tmp10 - tmp56 tmp69 = triton_helpers.maximum(tmp68, tmp19) tmp70 = libdevice.pow(tmp69, tmp21) tmp71 = tmp67 + tmp70 tmp72 = tmp71 - tmp12 tmp73 = tmp72 * tmp51 tmp74 = tmp73 >= tmp19 tmp75 = tl.where(tmp74, tmp56, tmp54) tmp76 = tmp55 * tmp1 tmp77 = tmp75 + tmp76 tmp78 = tmp2 - tmp77 tmp79 = triton_helpers.maximum(tmp78, tmp19) tmp80 = libdevice.pow(tmp79, tmp21) tmp81 = tmp4 - tmp77 tmp82 = triton_helpers.maximum(tmp81, tmp19) tmp83 = libdevice.pow(tmp82, tmp21) tmp84 = tmp80 + tmp83 tmp85 = tmp7 - tmp77 tmp86 = triton_helpers.maximum(tmp85, tmp19) tmp87 = libdevice.pow(tmp86, tmp21) tmp88 = tmp84 + tmp87 tmp89 = tmp10 - tmp77 tmp90 = triton_helpers.maximum(tmp89, tmp19) tmp91 = libdevice.pow(tmp90, tmp21) tmp92 = tmp88 + tmp91 tmp93 = tmp92 - tmp12 tmp94 = tmp93 * tmp51 tmp95 = tmp94 >= tmp19 tmp96 = tl.where(tmp95, tmp77, tmp75) tmp97 = tmp76 * tmp1 tmp98 = tmp96 + tmp97 tmp99 = tmp2 - tmp98 tmp100 = triton_helpers.maximum(tmp99, tmp19) tmp101 = libdevice.pow(tmp100, tmp21) tmp102 = tmp4 - tmp98 tmp103 = triton_helpers.maximum(tmp102, tmp19) tmp104 = libdevice.pow(tmp103, tmp21) tmp105 = tmp101 + tmp104 tmp106 = tmp7 - tmp98 tmp107 = triton_helpers.maximum(tmp106, tmp19) tmp108 = libdevice.pow(tmp107, tmp21) tmp109 = tmp105 + tmp108 tmp110 = tmp10 - tmp98 tmp111 = triton_helpers.maximum(tmp110, tmp19) tmp112 = libdevice.pow(tmp111, tmp21) tmp113 = tmp109 + tmp112 tmp114 = tmp113 - tmp12 tmp115 = tmp114 * tmp51 tmp116 = tmp115 >= tmp19 tmp117 = tl.where(tmp116, tmp98, tmp96) tmp118 = tmp97 * tmp1 tmp119 = tmp117 + tmp118 tmp120 = tmp2 - tmp119 tmp121 = triton_helpers.maximum(tmp120, tmp19) tmp122 = libdevice.pow(tmp121, tmp21) tmp123 = tmp4 - tmp119 tmp124 = triton_helpers.maximum(tmp123, tmp19) tmp125 = libdevice.pow(tmp124, tmp21) tmp126 = tmp122 + tmp125 tmp127 = tmp7 - tmp119 tmp128 = triton_helpers.maximum(tmp127, tmp19) tmp129 = libdevice.pow(tmp128, tmp21) tmp130 = tmp126 + tmp129 tmp131 = tmp10 - tmp119 tmp132 = triton_helpers.maximum(tmp131, tmp19) tmp133 = libdevice.pow(tmp132, tmp21) tmp134 = tmp130 + tmp133 tmp135 = tmp134 - tmp12 tmp136 = tmp135 * tmp51 tmp137 = tmp136 >= tmp19 tmp138 = tl.where(tmp137, tmp119, tmp117) tmp139 = tmp118 * tmp1 tmp140 = tmp138 + tmp139 tmp141 = tmp2 - tmp140 tmp142 = triton_helpers.maximum(tmp141, tmp19) tmp143 = libdevice.pow(tmp142, tmp21) tmp144 = tmp4 - tmp140 tmp145 = triton_helpers.maximum(tmp144, tmp19) tmp146 = libdevice.pow(tmp145, tmp21) tmp147 = tmp143 + tmp146 tmp148 = tmp7 - tmp140 tmp149 = triton_helpers.maximum(tmp148, tmp19) tmp150 = libdevice.pow(tmp149, tmp21) tmp151 = tmp147 + tmp150 tmp152 = tmp10 - tmp140 tmp153 = triton_helpers.maximum(tmp152, tmp19) tmp154 = libdevice.pow(tmp153, tmp21) tmp155 = tmp151 + tmp154 tmp156 = tmp155 - tmp12 tmp157 = tmp156 * tmp51 tmp158 = tmp157 >= tmp19 tmp159 = tl.where(tmp158, tmp140, tmp138) tmp160 = tmp139 * tmp1 tmp161 = tmp159 + tmp160 tmp162 = tmp2 - tmp161 tmp163 = triton_helpers.maximum(tmp162, tmp19) tmp164 = libdevice.pow(tmp163, tmp21) tmp165 = tmp4 - tmp161 tmp166 = triton_helpers.maximum(tmp165, tmp19) tmp167 = libdevice.pow(tmp166, tmp21) tmp168 = tmp164 + tmp167 tmp169 = tmp7 - tmp161 tmp170 = triton_helpers.maximum(tmp169, tmp19) tmp171 = libdevice.pow(tmp170, tmp21) tmp172 = tmp168 + tmp171 tmp173 = tmp10 - tmp161 tmp174 = triton_helpers.maximum(tmp173, tmp19) tmp175 = libdevice.pow(tmp174, tmp21) tmp176 = tmp172 + tmp175 tmp177 = tmp176 - tmp12 tmp178 = tmp177 * tmp51 tmp179 = tmp178 >= tmp19 tmp180 = tl.where(tmp179, tmp161, tmp159) tmp181 = tmp160 * tmp1 tmp182 = tmp180 + tmp181 tmp183 = tmp2 - tmp182 tmp184 = triton_helpers.maximum(tmp183, tmp19) tmp185 = libdevice.pow(tmp184, tmp21) tmp186 = tmp4 - tmp182 tmp187 = triton_helpers.maximum(tmp186, tmp19) tmp188 = libdevice.pow(tmp187, tmp21) tmp189 = tmp185 + tmp188 tmp190 = tmp7 - tmp182 tmp191 = triton_helpers.maximum(tmp190, tmp19) tmp192 = libdevice.pow(tmp191, tmp21) tmp193 = tmp189 + tmp192 tmp194 = tmp10 - tmp182 tmp195 = triton_helpers.maximum(tmp194, tmp19) tmp196 = libdevice.pow(tmp195, tmp21) tmp197 = tmp193 + tmp196 tmp198 = tmp197 - tmp12 tmp199 = tmp198 * tmp51 tmp200 = tmp199 >= tmp19 tmp201 = tl.where(tmp200, tmp182, tmp180) tmp202 = tmp181 * tmp1 tmp203 = tmp201 + tmp202 tmp204 = tmp2 - tmp203 tmp205 = triton_helpers.maximum(tmp204, tmp19) tmp206 = libdevice.pow(tmp205, tmp21) tmp207 = tmp4 - tmp203 tmp208 = triton_helpers.maximum(tmp207, tmp19) tmp209 = libdevice.pow(tmp208, tmp21) tmp210 = tmp206 + tmp209 tmp211 = tmp7 - tmp203 tmp212 = triton_helpers.maximum(tmp211, tmp19) tmp213 = libdevice.pow(tmp212, tmp21) tmp214 = tmp210 + tmp213 tmp215 = tmp10 - tmp203 tmp216 = triton_helpers.maximum(tmp215, tmp19) tmp217 = libdevice.pow(tmp216, tmp21) tmp218 = tmp214 + tmp217 tmp219 = tmp218 - tmp12 tmp220 = tmp219 * tmp51 tmp221 = tmp220 >= tmp19 tmp222 = tl.where(tmp221, tmp203, tmp201) tmp223 = tmp202 * tmp1 tmp224 = tmp222 + tmp223 tmp225 = tmp2 - tmp224 tmp226 = triton_helpers.maximum(tmp225, tmp19) tmp227 = libdevice.pow(tmp226, tmp21) tmp228 = tmp4 - tmp224 tmp229 = triton_helpers.maximum(tmp228, tmp19) tmp230 = libdevice.pow(tmp229, tmp21) tmp231 = tmp227 + tmp230 tmp232 = tmp7 - tmp224 tmp233 = triton_helpers.maximum(tmp232, tmp19) tmp234 = libdevice.pow(tmp233, tmp21) tmp235 = tmp231 + tmp234 tmp236 = tmp10 - tmp224 tmp237 = triton_helpers.maximum(tmp236, tmp19) tmp238 = libdevice.pow(tmp237, tmp21) tmp239 = tmp235 + tmp238 tmp240 = tmp239 - tmp12 tmp241 = tmp240 * tmp51 tmp242 = tmp241 >= tmp19 tmp243 = tl.where(tmp242, tmp224, tmp222) tmp244 = tmp223 * tmp1 tmp245 = tmp243 + tmp244 tmp246 = tmp2 - tmp245 tmp247 = triton_helpers.maximum(tmp246, tmp19) tmp248 = libdevice.pow(tmp247, tmp21) tmp249 = tmp4 - tmp245 tmp250 = triton_helpers.maximum(tmp249, tmp19) tmp251 = libdevice.pow(tmp250, tmp21) tmp252 = tmp248 + tmp251 tmp253 = tmp7 - tmp245 tmp254 = triton_helpers.maximum(tmp253, tmp19) tmp255 = libdevice.pow(tmp254, tmp21) tmp256 = tmp252 + tmp255 tmp257 = tmp10 - tmp245 tmp258 = triton_helpers.maximum(tmp257, tmp19) tmp259 = libdevice.pow(tmp258, tmp21) tmp260 = tmp256 + tmp259 tmp261 = tmp260 - tmp12 tmp262 = tmp261 * tmp51 tmp263 = tmp262 >= tmp19 tmp264 = tl.where(tmp263, tmp245, tmp243) tmp265 = tmp244 * tmp1 tmp266 = tmp264 + tmp265 tmp267 = tmp2 - tmp266 tmp268 = triton_helpers.maximum(tmp267, tmp19) tmp269 = libdevice.pow(tmp268, tmp21) tmp270 = tmp4 - tmp266 tmp271 = triton_helpers.maximum(tmp270, tmp19) tmp272 = libdevice.pow(tmp271, tmp21) tmp273 = tmp269 + tmp272 tmp274 = tmp7 - tmp266 tmp275 = triton_helpers.maximum(tmp274, tmp19) tmp276 = libdevice.pow(tmp275, tmp21) tmp277 = tmp273 + tmp276 tmp278 = tmp10 - tmp266 tmp279 = triton_helpers.maximum(tmp278, tmp19) tmp280 = libdevice.pow(tmp279, tmp21) tmp281 = tmp277 + tmp280 tmp282 = tmp281 - tmp12 tmp283 = tmp282 * tmp51 tmp284 = tmp283 >= tmp19 tmp285 = tl.where(tmp284, tmp266, tmp264) tmp286 = tmp265 * tmp1 tmp287 = tmp285 + tmp286 tmp288 = tmp2 - tmp287 tmp289 = triton_helpers.maximum(tmp288, tmp19) tmp290 = libdevice.pow(tmp289, tmp21) tmp291 = tmp4 - tmp287 tmp292 = triton_helpers.maximum(tmp291, tmp19) tmp293 = libdevice.pow(tmp292, tmp21) tmp294 = tmp290 + tmp293 tmp295 = tmp7 - tmp287 tmp296 = triton_helpers.maximum(tmp295, tmp19) tmp297 = libdevice.pow(tmp296, tmp21) tmp298 = tmp294 + tmp297 tmp299 = tmp10 - tmp287 tmp300 = triton_helpers.maximum(tmp299, tmp19) tmp301 = libdevice.pow(tmp300, tmp21) tmp302 = tmp298 + tmp301 tmp303 = tmp302 - tmp12 tmp304 = tmp303 * tmp51 tmp305 = tmp304 >= tmp19 tmp306 = tl.where(tmp305, tmp287, tmp285) tmp307 = tmp286 * tmp1 tmp308 = tmp307 * tmp1 tmp309 = tmp306 + tmp307 tmp310 = tmp2 - tmp309 tmp311 = triton_helpers.maximum(tmp310, tmp19) tmp312 = libdevice.pow(tmp311, tmp21) tmp313 = tmp4 - tmp309 tmp314 = triton_helpers.maximum(tmp313, tmp19) tmp315 = libdevice.pow(tmp314, tmp21) tmp316 = tmp312 + tmp315 tmp317 = tmp7 - tmp309 tmp318 = triton_helpers.maximum(tmp317, tmp19) tmp319 = libdevice.pow(tmp318, tmp21) tmp320 = tmp316 + tmp319 tmp321 = tmp10 - tmp309 tmp322 = triton_helpers.maximum(tmp321, tmp19) tmp323 = libdevice.pow(tmp322, tmp21) tmp324 = tmp320 + tmp323 tmp325 = tmp324 - tmp12 tmp326 = tmp325 * tmp51 tmp327 = tmp326 >= tmp19 tmp328 = tl.where(tmp327, tmp309, tmp306) tmp329 = tmp328 + tmp308 tmp330 = tmp2 - tmp329 tmp331 = triton_helpers.maximum(tmp330, tmp19) tmp332 = libdevice.pow(tmp331, tmp21) tmp333 = tmp4 - tmp329 tmp334 = triton_helpers.maximum(tmp333, tmp19) tmp335 = libdevice.pow(tmp334, tmp21) tmp336 = tmp332 + tmp335 tmp337 = tmp7 - tmp329 tmp338 = triton_helpers.maximum(tmp337, tmp19) tmp339 = libdevice.pow(tmp338, tmp21) tmp340 = tmp336 + tmp339 tmp341 = tmp10 - tmp329 tmp342 = triton_helpers.maximum(tmp341, tmp19) tmp343 = libdevice.pow(tmp342, tmp21) tmp344 = tmp340 + tmp343 tmp345 = tmp344 - tmp12 tmp346 = tmp345 * tmp51 tmp347 = tmp346 >= tmp19 tmp348 = tl.where(tmp347, tmp329, tmp328) tmp349 = tmp308 * tmp1 tmp350 = tmp348 + tmp349 tmp351 = tmp2 - tmp350 tmp352 = triton_helpers.maximum(tmp351, tmp19) tmp353 = libdevice.pow(tmp352, tmp21) tmp354 = tmp4 - tmp350 tmp355 = triton_helpers.maximum(tmp354, tmp19) tmp356 = libdevice.pow(tmp355, tmp21) tmp357 = tmp353 + tmp356 tmp358 = tmp7 - tmp350 tmp359 = triton_helpers.maximum(tmp358, tmp19) tmp360 = libdevice.pow(tmp359, tmp21) tmp361 = tmp357 + tmp360 tmp362 = tmp10 - tmp350 tmp363 = triton_helpers.maximum(tmp362, tmp19) tmp364 = libdevice.pow(tmp363, tmp21) tmp365 = tmp361 + tmp364 tmp366 = tmp365 - tmp12 tmp367 = tmp366 * tmp51 tmp368 = tmp367 >= tmp19 tmp369 = tl.where(tmp368, tmp350, tmp348) tmp370 = tmp349 * tmp1 tmp371 = tmp369 + tmp370 tmp372 = tmp2 - tmp371 tmp373 = triton_helpers.maximum(tmp372, tmp19) tmp374 = libdevice.pow(tmp373, tmp21) tmp375 = tmp4 - tmp371 tmp376 = triton_helpers.maximum(tmp375, tmp19) tmp377 = libdevice.pow(tmp376, tmp21) tmp378 = tmp374 + tmp377 tmp379 = tmp7 - tmp371 tmp380 = triton_helpers.maximum(tmp379, tmp19) tmp381 = libdevice.pow(tmp380, tmp21) tmp382 = tmp378 + tmp381 tmp383 = tmp10 - tmp371 tmp384 = triton_helpers.maximum(tmp383, tmp19) tmp385 = libdevice.pow(tmp384, tmp21) tmp386 = tmp382 + tmp385 tmp387 = tmp386 - tmp12 tmp388 = tmp387 * tmp51 tmp389 = tmp388 >= tmp19 tmp390 = tl.where(tmp389, tmp371, tmp369) tmp391 = tmp370 * tmp1 tmp392 = tmp390 + tmp391 tmp393 = tmp2 - tmp392 tmp394 = triton_helpers.maximum(tmp393, tmp19) tmp395 = libdevice.pow(tmp394, tmp21) tmp396 = tmp4 - tmp392 tmp397 = triton_helpers.maximum(tmp396, tmp19) tmp398 = libdevice.pow(tmp397, tmp21) tmp399 = tmp395 + tmp398 tmp400 = tmp7 - tmp392 tmp401 = triton_helpers.maximum(tmp400, tmp19) tmp402 = libdevice.pow(tmp401, tmp21) tmp403 = tmp399 + tmp402 tmp404 = tmp10 - tmp392 tmp405 = triton_helpers.maximum(tmp404, tmp19) tmp406 = libdevice.pow(tmp405, tmp21) tmp407 = tmp403 + tmp406 tmp408 = tmp407 - tmp12 tmp409 = tmp408 * tmp51 tmp410 = tmp409 >= tmp19 tmp411 = tl.where(tmp410, tmp392, tmp390) tmp412 = tmp391 * tmp1 tmp413 = tmp411 + tmp412 tmp414 = tmp2 - tmp413 tmp415 = triton_helpers.maximum(tmp414, tmp19) tmp416 = libdevice.pow(tmp415, tmp21) tmp417 = tmp4 - tmp413 tmp418 = triton_helpers.maximum(tmp417, tmp19) tmp419 = libdevice.pow(tmp418, tmp21) tmp420 = tmp416 + tmp419 tmp421 = tmp7 - tmp413 tmp422 = triton_helpers.maximum(tmp421, tmp19) tmp423 = libdevice.pow(tmp422, tmp21) tmp424 = tmp420 + tmp423 tmp425 = tmp10 - tmp413 tmp426 = triton_helpers.maximum(tmp425, tmp19) tmp427 = libdevice.pow(tmp426, tmp21) tmp428 = tmp424 + tmp427 tmp429 = tmp428 - tmp12 tmp430 = tmp429 * tmp51 tmp431 = tmp430 >= tmp19 tmp432 = tl.where(tmp431, tmp413, tmp411) tmp433 = tmp412 * tmp1 tmp434 = tmp432 + tmp433 tmp435 = tmp2 - tmp434 tmp436 = triton_helpers.maximum(tmp435, tmp19) tmp437 = libdevice.pow(tmp436, tmp21) tmp438 = tmp4 - tmp434 tmp439 = triton_helpers.maximum(tmp438, tmp19) tmp440 = libdevice.pow(tmp439, tmp21) tmp441 = tmp437 + tmp440 tmp442 = tmp7 - tmp434 tmp443 = triton_helpers.maximum(tmp442, tmp19) tmp444 = libdevice.pow(tmp443, tmp21) tmp445 = tmp441 + tmp444 tmp446 = tmp10 - tmp434 tmp447 = triton_helpers.maximum(tmp446, tmp19) tmp448 = libdevice.pow(tmp447, tmp21) tmp449 = tmp445 + tmp448 tmp450 = tmp449 - tmp12 tmp451 = tmp450 * tmp51 tmp452 = tmp451 >= tmp19 tmp453 = tl.where(tmp452, tmp434, tmp432) tmp454 = tmp433 * tmp1 tmp455 = tmp453 + tmp454 tmp456 = tmp2 - tmp455 tmp457 = triton_helpers.maximum(tmp456, tmp19) tmp458 = libdevice.pow(tmp457, tmp21) tmp459 = tmp4 - tmp455 tmp460 = triton_helpers.maximum(tmp459, tmp19) tmp461 = libdevice.pow(tmp460, tmp21) tmp462 = tmp458 + tmp461 tmp463 = tmp7 - tmp455 tmp464 = triton_helpers.maximum(tmp463, tmp19) tmp465 = libdevice.pow(tmp464, tmp21) tmp466 = tmp462 + tmp465 tmp467 = tmp10 - tmp455 tmp468 = triton_helpers.maximum(tmp467, tmp19) tmp469 = libdevice.pow(tmp468, tmp21) tmp470 = tmp466 + tmp469 tmp471 = tmp470 - tmp12 tmp472 = tmp471 * tmp51 tmp473 = tmp472 >= tmp19 tmp474 = tl.where(tmp473, tmp455, tmp453) tmp475 = tmp454 * tmp1 tmp476 = tmp474 + tmp475 tmp477 = tmp2 - tmp476 tmp478 = triton_helpers.maximum(tmp477, tmp19) tmp479 = libdevice.pow(tmp478, tmp21) tmp480 = tmp4 - tmp476 tmp481 = triton_helpers.maximum(tmp480, tmp19) tmp482 = libdevice.pow(tmp481, tmp21) tmp483 = tmp479 + tmp482 tmp484 = tmp7 - tmp476 tmp485 = triton_helpers.maximum(tmp484, tmp19) tmp486 = libdevice.pow(tmp485, tmp21) tmp487 = tmp483 + tmp486 tmp488 = tmp10 - tmp476 tmp489 = triton_helpers.maximum(tmp488, tmp19) tmp490 = libdevice.pow(tmp489, tmp21) tmp491 = tmp487 + tmp490 tmp492 = tmp491 - tmp12 tmp493 = tmp492 * tmp51 tmp494 = tmp493 >= tmp19 tmp495 = tl.where(tmp494, tmp476, tmp474) tmp496 = tmp475 * tmp1 tmp497 = tmp495 + tmp496 tmp498 = tmp2 - tmp497 tmp499 = triton_helpers.maximum(tmp498, tmp19) tmp500 = libdevice.pow(tmp499, tmp21) tmp501 = tmp4 - tmp497 tmp502 = triton_helpers.maximum(tmp501, tmp19) tmp503 = libdevice.pow(tmp502, tmp21) tmp504 = tmp500 + tmp503 tmp505 = tmp7 - tmp497 tmp506 = triton_helpers.maximum(tmp505, tmp19) tmp507 = libdevice.pow(tmp506, tmp21) tmp508 = tmp504 + tmp507 tmp509 = tmp10 - tmp497 tmp510 = triton_helpers.maximum(tmp509, tmp19) tmp511 = libdevice.pow(tmp510, tmp21) tmp512 = tmp508 + tmp511 tmp513 = tmp512 - tmp12 tmp514 = tmp513 * tmp51 tmp515 = tmp514 >= tmp19 tmp516 = tl.where(tmp515, tmp497, tmp495) tmp517 = tmp496 * tmp1 tmp518 = tmp516 + tmp517 tmp519 = tmp2 - tmp518 tmp520 = triton_helpers.maximum(tmp519, tmp19) tmp521 = libdevice.pow(tmp520, tmp21) tmp522 = tmp4 - tmp518 tmp523 = triton_helpers.maximum(tmp522, tmp19) tmp524 = libdevice.pow(tmp523, tmp21) tmp525 = tmp521 + tmp524 tmp526 = tmp7 - tmp518 tmp527 = triton_helpers.maximum(tmp526, tmp19) tmp528 = libdevice.pow(tmp527, tmp21) tmp529 = tmp525 + tmp528 tmp530 = tmp10 - tmp518 tmp531 = triton_helpers.maximum(tmp530, tmp19) tmp532 = libdevice.pow(tmp531, tmp21) tmp533 = tmp529 + tmp532 tmp534 = tmp533 - tmp12 tmp535 = tmp534 * tmp51 tmp536 = tmp535 >= tmp19 tmp537 = tl.where(tmp536, tmp518, tmp516) tmp538 = tmp517 * tmp1 tmp539 = tmp537 + tmp538 tmp540 = tmp2 - tmp539 tmp541 = triton_helpers.maximum(tmp540, tmp19) tmp542 = libdevice.pow(tmp541, tmp21) tmp543 = tmp4 - tmp539 tmp544 = triton_helpers.maximum(tmp543, tmp19) tmp545 = libdevice.pow(tmp544, tmp21) tmp546 = tmp542 + tmp545 tmp547 = tmp7 - tmp539 tmp548 = triton_helpers.maximum(tmp547, tmp19) tmp549 = libdevice.pow(tmp548, tmp21) tmp550 = tmp546 + tmp549 tmp551 = tmp10 - tmp539 tmp552 = triton_helpers.maximum(tmp551, tmp19) tmp553 = libdevice.pow(tmp552, tmp21) tmp554 = tmp550 + tmp553 tmp555 = tmp554 - tmp12 tmp556 = tmp555 * tmp51 tmp557 = tmp556 >= tmp19 tmp558 = tl.where(tmp557, tmp539, tmp537) tmp559 = tmp538 * tmp1 tmp560 = tmp558 + tmp559 tmp561 = tmp2 - tmp560 tmp562 = triton_helpers.maximum(tmp561, tmp19) tmp563 = libdevice.pow(tmp562, tmp21) tmp564 = tmp4 - tmp560 tmp565 = triton_helpers.maximum(tmp564, tmp19) tmp566 = libdevice.pow(tmp565, tmp21) tmp567 = tmp563 + tmp566 tmp568 = tmp7 - tmp560 tmp569 = triton_helpers.maximum(tmp568, tmp19) tmp570 = libdevice.pow(tmp569, tmp21) tmp571 = tmp567 + tmp570 tmp572 = tmp10 - tmp560 tmp573 = triton_helpers.maximum(tmp572, tmp19) tmp574 = libdevice.pow(tmp573, tmp21) tmp575 = tmp571 + tmp574 tl.store(out_ptr0 + (x0), tmp49, xmask) tl.store(out_ptr19 + (x0), tmp308, xmask) tl.store(in_out_ptr12 + (x0), tmp558, xmask) tl.store(out_ptr27 + (x0), tmp575, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wp/cwps7jwo24axevjav4czdrzol2hcmol74gjjohufcaso6g5zh2my.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, tau_m_25, tau_lo_26, dm_27, tau_m_26, sub_87, clamp_27, truediv_27, p_m_26], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow] # Source node to ATen node mapping: # X => mul # clamp_27 => clamp_min_27 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # p_m_26 => pow_30 # sub => full_default # sub_87 => sub_87 # tau_lo_26 => where_25 # tau_m_25 => add_25 # tau_m_26 => add_26 # truediv_27 => full_default_30 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_24, %div_25), kwargs = {}) # %where_25 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_25, %add_25, %where_24), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %add_26 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_25, %div_26), kwargs = {}) # %sub_87 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_26), kwargs = {}) # %clamp_min_27 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_87, 0), kwargs = {}) # %full_default_30 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_30 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_27, %full_default_30), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_where_1 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_where_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_where_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_where_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp11 + tmp23 tmp25 = tl.where(tmp10, tmp24, tmp11) tmp26 = tmp23 * tmp1 tmp27 = tmp25 + tmp26 tmp28 = tmp2 - tmp27 tmp29 = triton_helpers.maximum(tmp28, tmp9) tmp30 = 2.0 tmp31 = libdevice.pow(tmp29, tmp30) tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/h7/ch7plngbxligq5e4vnkulisynihcx6p3zjpiwu3vq4ul6nzoouq7.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, tau_m_25, tau_lo_26, dm_27, tau_m_26, tau_lo_27, dm_28, tau_m_27, sub_90, clamp_28, truediv_28, p_m_27, sum_29], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] # Source node to ATen node mapping: # X => mul # clamp_28 => clamp_min_28 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # p_m_27 => pow_31 # sub => full_default # sub_90 => sub_90 # sum_29 => sum_29 # tau_lo_26 => where_25 # tau_lo_27 => where_26 # tau_m_25 => add_25 # tau_m_26 => add_26 # tau_m_27 => add_27 # truediv_28 => full_default_31 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_24, %div_25), kwargs = {}) # %where_25 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_25, %add_25, %where_24), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %add_26 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_25, %div_26), kwargs = {}) # %where_26 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_26, %add_26, %where_25), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {}) # %sub_90 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_27), kwargs = {}) # %clamp_min_28 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_90, 0), kwargs = {}) # %full_default_31 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_31 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_28, %full_default_31), kwargs = {}) # %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_31, [-1]), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (x0), xmask) tmp14 = tl.load(in_ptr2 + (x0), xmask) tmp18 = tl.load(in_out_ptr0 + (x0), xmask) tmp19 = tl.load(in_ptr3 + (x0), xmask) tmp37 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp51 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp15 = tmp14 - tmp7 tmp16 = tmp15 * tmp10 tmp17 = tmp16 >= tmp12 tmp20 = 0.5 tmp21 = tmp19 * tmp20 tmp22 = tmp21 * tmp20 tmp23 = tmp22 * tmp20 tmp24 = tmp23 * tmp20 tmp25 = tmp24 * tmp20 tmp26 = tmp25 * tmp20 tmp27 = tmp26 * tmp20 tmp28 = tmp27 * tmp20 tmp29 = tmp28 * tmp20 tmp30 = tmp29 * tmp20 tmp31 = tmp30 * tmp20 tmp32 = tmp18 + tmp31 tmp33 = tl.where(tmp17, tmp32, tmp18) tmp34 = tmp31 * tmp20 tmp35 = tmp33 + tmp34 tmp36 = tl.where(tmp13, tmp35, tmp33) tmp38 = tmp37 * tmp20 tmp39 = tmp34 * tmp20 tmp40 = tmp36 + tmp39 tmp41 = tmp38 - tmp40 tmp42 = triton_helpers.maximum(tmp41, tmp12) tmp43 = 2.0 tmp44 = libdevice.pow(tmp42, tmp43) tmp46 = tmp45 * tmp20 tmp47 = tmp46 - tmp40 tmp48 = triton_helpers.maximum(tmp47, tmp12) tmp49 = libdevice.pow(tmp48, tmp43) tmp50 = tmp44 + tmp49 tmp52 = tmp51 * tmp20 tmp53 = tmp52 - tmp40 tmp54 = triton_helpers.maximum(tmp53, tmp12) tmp55 = libdevice.pow(tmp54, tmp43) tmp56 = tmp50 + tmp55 tmp58 = tmp57 * tmp20 tmp59 = tmp58 - tmp40 tmp60 = triton_helpers.maximum(tmp59, tmp12) tmp61 = libdevice.pow(tmp60, tmp43) tmp62 = tmp56 + tmp61 tl.store(in_out_ptr0 + (x0), tmp36, xmask) tl.store(out_ptr0 + (x0), tmp62, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xe/cxe754wrl5sjitrmajy2srrq6d2mjokc5tmty2qkwvhzgcsuyn5m.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, sub_93], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where] # Source node to ATen node mapping: # X => mul # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # sub => full_default # sub_93 => sub_93 # tau_lo_28 => where_27 # tau_m_27 => add_27 # tau_m_28 => add_28 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {}) # %where_27 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_27, %add_27, %where_26), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_27, %div_28), kwargs = {}) # %sub_93 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_28), kwargs = {}) triton_poi_fused_add_div_mul_sub_where_3 = async_compile.triton('triton_poi_fused_add_div_mul_sub_where_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_where_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp11 + tmp25 tmp27 = tl.where(tmp10, tmp26, tmp11) tmp28 = tmp25 * tmp1 tmp29 = tmp27 + tmp28 tmp30 = tmp2 - tmp29 tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/if/cif3u3drxkf366ugeo7gtgyy55gcup24bhcpwne6p454cqre4263.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, tau_lo_29, dm_30, tau_m_29, sub_96, clamp_30, truediv_30, p_m_29, sum_31], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] # Source node to ATen node mapping: # X => mul # clamp_30 => clamp_min_30 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # p_m_29 => pow_33 # sub => full_default # sub_96 => sub_96 # sum_31 => sum_31 # tau_lo_28 => where_27 # tau_lo_29 => where_28 # tau_m_27 => add_27 # tau_m_28 => add_28 # tau_m_29 => add_29 # truediv_30 => full_default_33 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {}) # %where_27 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_27, %add_27, %where_26), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_27, %div_28), kwargs = {}) # %where_28 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_28, %add_28, %where_27), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {}) # %sub_96 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_29), kwargs = {}) # %clamp_min_30 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_96, 0), kwargs = {}) # %full_default_33 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_33 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_30, %full_default_33), kwargs = {}) # %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_33, [-1]), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (x0), xmask) tmp23 = tl.load(in_ptr2 + (x0), xmask) tmp27 = tl.load(in_out_ptr0 + (x0), xmask) tmp28 = tl.load(in_ptr3 + (x0), xmask) tmp48 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last') tmp55 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp61 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp67 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp24 = tmp23 - tmp17 tmp25 = tmp24 * tmp20 tmp26 = tmp25 >= tmp1 tmp29 = 0.5 tmp30 = tmp28 * tmp29 tmp31 = tmp30 * tmp29 tmp32 = tmp31 * tmp29 tmp33 = tmp32 * tmp29 tmp34 = tmp33 * tmp29 tmp35 = tmp34 * tmp29 tmp36 = tmp35 * tmp29 tmp37 = tmp36 * tmp29 tmp38 = tmp37 * tmp29 tmp39 = tmp38 * tmp29 tmp40 = tmp39 * tmp29 tmp41 = tmp40 * tmp29 tmp42 = tmp41 * tmp29 tmp43 = tmp27 + tmp42 tmp44 = tl.where(tmp26, tmp43, tmp27) tmp45 = tmp42 * tmp29 tmp46 = tmp44 + tmp45 tmp47 = tl.where(tmp22, tmp46, tmp44) tmp49 = tmp48 * tmp29 tmp50 = tmp45 * tmp29 tmp51 = tmp47 + tmp50 tmp52 = tmp49 - tmp51 tmp53 = triton_helpers.maximum(tmp52, tmp1) tmp54 = libdevice.pow(tmp53, tmp3) tmp56 = tmp55 * tmp29 tmp57 = tmp56 - tmp51 tmp58 = triton_helpers.maximum(tmp57, tmp1) tmp59 = libdevice.pow(tmp58, tmp3) tmp60 = tmp54 + tmp59 tmp62 = tmp61 * tmp29 tmp63 = tmp62 - tmp51 tmp64 = triton_helpers.maximum(tmp63, tmp1) tmp65 = libdevice.pow(tmp64, tmp3) tmp66 = tmp60 + tmp65 tmp68 = tmp67 * tmp29 tmp69 = tmp68 - tmp51 tmp70 = triton_helpers.maximum(tmp69, tmp1) tmp71 = libdevice.pow(tmp70, tmp3) tmp72 = tmp66 + tmp71 tl.store(in_out_ptr0 + (x0), tmp47, xmask) tl.store(out_ptr0 + (x0), tmp72, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dr/cdrunpih7elheat4jd7sxrqywzlxcqzpus4s2kn3q77rb3owzmr3.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, sub_99], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where] # Source node to ATen node mapping: # X => mul # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # sub => full_default # sub_99 => sub_99 # tau_lo_30 => where_29 # tau_m_29 => add_29 # tau_m_30 => add_30 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {}) # %where_29 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_29, %add_29, %where_28), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_29, %div_30), kwargs = {}) # %sub_99 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_30), kwargs = {}) triton_poi_fused_add_div_mul_sub_where_5 = async_compile.triton('triton_poi_fused_add_div_mul_sub_where_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_where_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_where_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp11 + tmp27 tmp29 = tl.where(tmp10, tmp28, tmp11) tmp30 = tmp27 * tmp1 tmp31 = tmp29 + tmp30 tmp32 = tmp2 - tmp31 tl.store(out_ptr0 + (x2), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6v/c6vga2tcnyteymwttvjbp5mmaopovt5iyiazfoj4b7d6pqavknf6.py # Topologically Sorted Source Nodes: [sub, X, f_lo, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, tau_lo_31, dm_32, tau_m_31, sub_102, clamp_32, truediv_32, p_m_31, sum_33, tau_lo_32, dm_33, tau_m_32, sub_105, clamp_33, truediv_33, p_m_32, sum_34, f_m_32, mul_33, tau_lo_33, dm_34, tau_m_33, sub_108, clamp_34, truediv_34, p_m_33, sum_35, tau_lo_34], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] # Source node to ATen node mapping: # X => mul # clamp_32 => clamp_min_32 # clamp_33 => clamp_min_33 # clamp_34 => clamp_min_34 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # f_lo => sub_7 # f_m_32 => sub_107 # mul_33 => mul_67 # p_m_31 => pow_35 # p_m_32 => pow_36 # p_m_33 => pow_37 # sub => full_default # sub_102 => sub_102 # sub_105 => sub_105 # sub_108 => sub_108 # sum_33 => sum_33 # sum_34 => sum_34 # sum_35 => sum_35 # tau_lo_30 => where_29 # tau_lo_31 => where_30 # tau_lo_32 => where_31 # tau_lo_33 => where_32 # tau_lo_34 => where_33 # tau_m_29 => add_29 # tau_m_30 => add_30 # tau_m_31 => add_31 # tau_m_32 => add_32 # tau_m_33 => add_33 # truediv_32 => full_default_35 # truediv_33 => full_default_36 # truediv_34 => full_default_37 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %sub_7 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {}) # %where_29 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_29, %add_29, %where_28), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_29, %div_30), kwargs = {}) # %where_30 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_30, %add_30, %where_29), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %add_31 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_30, %div_31), kwargs = {}) # %sub_102 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_31), kwargs = {}) # %clamp_min_32 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_102, 0), kwargs = {}) # %full_default_35 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_35 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_32, %full_default_35), kwargs = {}) # %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_35, [-1]), kwargs = {}) # %where_31 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_31, %add_31, %where_30), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_31, %div_32), kwargs = {}) # %sub_105 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_32), kwargs = {}) # %clamp_min_33 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_105, 0), kwargs = {}) # %full_default_36 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_36 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_33, %full_default_36), kwargs = {}) # %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_36, [-1]), kwargs = {}) # %sub_107 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_34, 1), kwargs = {}) # %mul_67 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_107, %sub_7), kwargs = {}) # %where_32 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_32, %add_32, %where_31), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %add_33 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_32, %div_33), kwargs = {}) # %sub_108 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_33), kwargs = {}) # %clamp_min_34 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_108, 0), kwargs = {}) # %full_default_37 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_37 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_34, %full_default_37), kwargs = {}) # %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_37, [-1]), kwargs = {}) # %where_33 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_33, %add_33, %where_32), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr2'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6(in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (x0), xmask) tmp23 = tl.load(in_ptr2 + (x0), xmask) tmp27 = tl.load(in_out_ptr0 + (x0), xmask) tmp28 = tl.load(in_ptr3 + (x0), xmask) tmp50 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp63 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp69 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp24 = tmp23 - tmp17 tmp25 = tmp24 * tmp20 tmp26 = tmp25 >= tmp1 tmp29 = 0.5 tmp30 = tmp28 * tmp29 tmp31 = tmp30 * tmp29 tmp32 = tmp31 * tmp29 tmp33 = tmp32 * tmp29 tmp34 = tmp33 * tmp29 tmp35 = tmp34 * tmp29 tmp36 = tmp35 * tmp29 tmp37 = tmp36 * tmp29 tmp38 = tmp37 * tmp29 tmp39 = tmp38 * tmp29 tmp40 = tmp39 * tmp29 tmp41 = tmp40 * tmp29 tmp42 = tmp41 * tmp29 tmp43 = tmp42 * tmp29 tmp44 = tmp43 * tmp29 tmp45 = tmp27 + tmp44 tmp46 = tl.where(tmp26, tmp45, tmp27) tmp47 = tmp44 * tmp29 tmp48 = tmp46 + tmp47 tmp49 = tl.where(tmp22, tmp48, tmp46) tmp51 = tmp50 * tmp29 tmp52 = tmp47 * tmp29 tmp53 = tmp49 + tmp52 tmp54 = tmp51 - tmp53 tmp55 = triton_helpers.maximum(tmp54, tmp1) tmp56 = libdevice.pow(tmp55, tmp3) tmp58 = tmp57 * tmp29 tmp59 = tmp58 - tmp53 tmp60 = triton_helpers.maximum(tmp59, tmp1) tmp61 = libdevice.pow(tmp60, tmp3) tmp62 = tmp56 + tmp61 tmp64 = tmp63 * tmp29 tmp65 = tmp64 - tmp53 tmp66 = triton_helpers.maximum(tmp65, tmp1) tmp67 = libdevice.pow(tmp66, tmp3) tmp68 = tmp62 + tmp67 tmp70 = tmp69 * tmp29 tmp71 = tmp70 - tmp53 tmp72 = triton_helpers.maximum(tmp71, tmp1) tmp73 = libdevice.pow(tmp72, tmp3) tmp74 = tmp68 + tmp73 tmp75 = tmp74 - tmp17 tmp76 = tmp75 * tmp20 tmp77 = tmp76 >= tmp1 tmp78 = tl.where(tmp77, tmp53, tmp49) tmp79 = tmp52 * tmp29 tmp80 = tmp78 + tmp79 tmp81 = tmp51 - tmp80 tmp82 = triton_helpers.maximum(tmp81, tmp1) tmp83 = libdevice.pow(tmp82, tmp3) tmp84 = tmp58 - tmp80 tmp85 = triton_helpers.maximum(tmp84, tmp1) tmp86 = libdevice.pow(tmp85, tmp3) tmp87 = tmp83 + tmp86 tmp88 = tmp64 - tmp80 tmp89 = triton_helpers.maximum(tmp88, tmp1) tmp90 = libdevice.pow(tmp89, tmp3) tmp91 = tmp87 + tmp90 tmp92 = tmp70 - tmp80 tmp93 = triton_helpers.maximum(tmp92, tmp1) tmp94 = libdevice.pow(tmp93, tmp3) tmp95 = tmp91 + tmp94 tmp96 = tmp95 - tmp17 tmp97 = tmp96 * tmp20 tmp98 = tmp97 >= tmp1 tmp99 = tl.where(tmp98, tmp80, tmp78) tmp100 = tmp79 * tmp29 tmp101 = tmp99 + tmp100 tmp102 = tmp51 - tmp101 tmp103 = triton_helpers.maximum(tmp102, tmp1) tmp104 = libdevice.pow(tmp103, tmp3) tmp105 = tmp58 - tmp101 tmp106 = triton_helpers.maximum(tmp105, tmp1) tmp107 = libdevice.pow(tmp106, tmp3) tmp108 = tmp104 + tmp107 tmp109 = tmp64 - tmp101 tmp110 = triton_helpers.maximum(tmp109, tmp1) tmp111 = libdevice.pow(tmp110, tmp3) tmp112 = tmp108 + tmp111 tmp113 = tmp70 - tmp101 tmp114 = triton_helpers.maximum(tmp113, tmp1) tmp115 = libdevice.pow(tmp114, tmp3) tmp116 = tmp112 + tmp115 tmp117 = tmp116 - tmp17 tmp118 = tmp117 * tmp20 tmp119 = tmp118 >= tmp1 tmp120 = tl.where(tmp119, tmp101, tmp99) tl.store(in_out_ptr2 + (x0), tmp120, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qw/cqwjirql6abn7imghsni2jlkuybk4xkokodof2rpil4tssyex4uw.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, tau_m_34, sub_111, clamp_35, truediv_35, p_m_34], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow] # Source node to ATen node mapping: # X => mul # clamp_35 => clamp_min_35 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # p_m_34 => pow_38 # sub => full_default # sub_111 => sub_111 # tau_m_34 => add_34 # truediv_35 => full_default_38 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_33, %div_34), kwargs = {}) # %sub_111 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_34), kwargs = {}) # %clamp_min_35 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_111, 0), kwargs = {}) # %full_default_38 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_38 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_35, %full_default_38), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_7 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp3 + tmp24 tmp26 = tmp2 - tmp25 tmp27 = 0.0 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = 2.0 tmp30 = libdevice.pow(tmp28, tmp29) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3q/c3qnhz5l3skrhqr43bpfxlmkw6kumz7pxqtyenon3vddp3cudnfx.py # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, tau_m_34, tau_lo_35], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # tau_lo_35 => where_34 # tau_m_34 => add_34 # Graph fragment: # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_33, %div_34), kwargs = {}) # %where_34 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_34, %add_34, %where_33), kwargs = {}) triton_poi_fused_add_div_where_8 = async_compile.triton('triton_poi_fused_add_div_where_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (x0), xmask) tmp14 = tl.load(in_out_ptr0 + (x0), xmask) tmp15 = tl.load(in_ptr2 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = tmp17 * tmp16 tmp19 = tmp18 * tmp16 tmp20 = tmp19 * tmp16 tmp21 = tmp20 * tmp16 tmp22 = tmp21 * tmp16 tmp23 = tmp22 * tmp16 tmp24 = tmp23 * tmp16 tmp25 = tmp24 * tmp16 tmp26 = tmp25 * tmp16 tmp27 = tmp26 * tmp16 tmp28 = tmp27 * tmp16 tmp29 = tmp28 * tmp16 tmp30 = tmp29 * tmp16 tmp31 = tmp30 * tmp16 tmp32 = tmp31 * tmp16 tmp33 = tmp32 * tmp16 tmp34 = tmp33 * tmp16 tmp35 = tmp34 * tmp16 tmp36 = tmp35 * tmp16 tmp37 = tmp14 + tmp36 tmp38 = tl.where(tmp13, tmp37, tmp14) tl.store(in_out_ptr0 + (x0), tmp38, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/65/c65c6rqbfpxnnf52lttfmjffp6c57rq5vh5zishetax476bzxbyn.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, tau_m_35, sub_114, clamp_36, truediv_36, p_m_35], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow] # Source node to ATen node mapping: # X => mul # clamp_36 => clamp_min_36 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # p_m_35 => pow_39 # sub => full_default # sub_114 => sub_114 # tau_m_35 => add_35 # truediv_36 => full_default_39 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %add_35 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_34, %div_35), kwargs = {}) # %sub_114 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_35), kwargs = {}) # %clamp_min_36 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_114, 0), kwargs = {}) # %full_default_39 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_39 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_36, %full_default_39), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_9 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp3 + tmp25 tmp27 = tmp2 - tmp26 tmp28 = 0.0 tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = 2.0 tmp31 = libdevice.pow(tmp29, tmp30) tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hc/chcgwhxqzahejjnkomdddn3fpfvhgpal4i42dvbnrcw5fjtsuuip.py # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, tau_m_35, tau_lo_36], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # tau_lo_36 => where_35 # tau_m_35 => add_35 # Graph fragment: # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %add_35 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_34, %div_35), kwargs = {}) # %where_35 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_35, %add_35, %where_34), kwargs = {}) triton_poi_fused_add_div_where_10 = async_compile.triton('triton_poi_fused_add_div_where_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (x0), xmask) tmp14 = tl.load(in_out_ptr0 + (x0), xmask) tmp15 = tl.load(in_ptr2 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = tmp17 * tmp16 tmp19 = tmp18 * tmp16 tmp20 = tmp19 * tmp16 tmp21 = tmp20 * tmp16 tmp22 = tmp21 * tmp16 tmp23 = tmp22 * tmp16 tmp24 = tmp23 * tmp16 tmp25 = tmp24 * tmp16 tmp26 = tmp25 * tmp16 tmp27 = tmp26 * tmp16 tmp28 = tmp27 * tmp16 tmp29 = tmp28 * tmp16 tmp30 = tmp29 * tmp16 tmp31 = tmp30 * tmp16 tmp32 = tmp31 * tmp16 tmp33 = tmp32 * tmp16 tmp34 = tmp33 * tmp16 tmp35 = tmp34 * tmp16 tmp36 = tmp35 * tmp16 tmp37 = tmp36 * tmp16 tmp38 = tmp14 + tmp37 tmp39 = tl.where(tmp13, tmp38, tmp14) tl.store(in_out_ptr0 + (x0), tmp39, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hk/chkgkmcorveaoty2aubj4aiqs7c4icysi6i7uztapg7qoahqwomz.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, tau_m_36, sub_117, clamp_37], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp] # Source node to ATen node mapping: # X => mul # clamp_37 => clamp_min_37 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # sub => full_default # sub_117 => sub_117 # tau_m_36 => add_36 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %add_36 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_35, %div_36), kwargs = {}) # %sub_117 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_36), kwargs = {}) # %clamp_min_37 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_117, 0), kwargs = {}) triton_poi_fused_add_clamp_div_mul_sub_11 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_sub_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_sub_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_sub_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp3 + tmp26 tmp28 = tmp2 - tmp27 tmp29 = 0.0 tmp30 = triton_helpers.maximum(tmp28, tmp29) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6j/c6jx7jobgsfa3dnpqdh4py4h2yjwqcnyfrnfxf4y2pamiu6cwdpl.py # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, tau_m_36, tau_lo_37], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # tau_lo_37 => where_36 # tau_m_36 => add_36 # Graph fragment: # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %add_36 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_35, %div_36), kwargs = {}) # %where_36 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_36, %add_36, %where_35), kwargs = {}) triton_poi_fused_add_div_where_12 = async_compile.triton('triton_poi_fused_add_div_where_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (x0), xmask) tmp19 = tl.load(in_out_ptr0 + (x0), xmask) tmp20 = tl.load(in_ptr2 + (x0), xmask) tmp1 = 2.0 tmp2 = libdevice.pow(tmp0, tmp1) tmp4 = libdevice.pow(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = libdevice.pow(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = libdevice.pow(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp15 = tmp14 - tmp12 tmp16 = tmp13 * tmp15 tmp17 = 0.0 tmp18 = tmp16 >= tmp17 tmp21 = 0.5 tmp22 = tmp20 * tmp21 tmp23 = tmp22 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp24 * tmp21 tmp26 = tmp25 * tmp21 tmp27 = tmp26 * tmp21 tmp28 = tmp27 * tmp21 tmp29 = tmp28 * tmp21 tmp30 = tmp29 * tmp21 tmp31 = tmp30 * tmp21 tmp32 = tmp31 * tmp21 tmp33 = tmp32 * tmp21 tmp34 = tmp33 * tmp21 tmp35 = tmp34 * tmp21 tmp36 = tmp35 * tmp21 tmp37 = tmp36 * tmp21 tmp38 = tmp37 * tmp21 tmp39 = tmp38 * tmp21 tmp40 = tmp39 * tmp21 tmp41 = tmp40 * tmp21 tmp42 = tmp41 * tmp21 tmp43 = tmp42 * tmp21 tmp44 = tmp19 + tmp43 tmp45 = tl.where(tmp18, tmp44, tmp19) tl.store(in_out_ptr0 + (x0), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dg/cdgzbjixzgjbzoonwf327zqettjzik6jiaut4vfckxwzzrjpw4ci.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, sub_120, clamp_38], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp] # Source node to ATen node mapping: # X => mul # clamp_38 => clamp_min_38 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # sub => full_default # sub_120 => sub_120 # tau_m_37 => add_37 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %add_37 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_36, %div_37), kwargs = {}) # %sub_120 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_37), kwargs = {}) # %clamp_min_38 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_120, 0), kwargs = {}) triton_poi_fused_add_clamp_div_mul_sub_13 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_sub_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_sub_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_sub_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp3 + tmp27 tmp29 = tmp2 - tmp28 tmp30 = 0.0 tmp31 = triton_helpers.maximum(tmp29, tmp30) tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/f2/cf2smd7msnlfnwkswpdfa2pro4t25uk7k6sqqtuhugmitppzjw2h.py # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, tau_lo_38], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # tau_lo_38 => where_37 # tau_m_37 => add_37 # Graph fragment: # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %add_37 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_36, %div_37), kwargs = {}) # %where_37 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_37, %add_37, %where_36), kwargs = {}) triton_poi_fused_add_div_where_14 = async_compile.triton('triton_poi_fused_add_div_where_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (x0), xmask) tmp19 = tl.load(in_out_ptr0 + (x0), xmask) tmp20 = tl.load(in_ptr2 + (x0), xmask) tmp1 = 2.0 tmp2 = libdevice.pow(tmp0, tmp1) tmp4 = libdevice.pow(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = libdevice.pow(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = libdevice.pow(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp15 = tmp14 - tmp12 tmp16 = tmp13 * tmp15 tmp17 = 0.0 tmp18 = tmp16 >= tmp17 tmp21 = 0.5 tmp22 = tmp20 * tmp21 tmp23 = tmp22 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp24 * tmp21 tmp26 = tmp25 * tmp21 tmp27 = tmp26 * tmp21 tmp28 = tmp27 * tmp21 tmp29 = tmp28 * tmp21 tmp30 = tmp29 * tmp21 tmp31 = tmp30 * tmp21 tmp32 = tmp31 * tmp21 tmp33 = tmp32 * tmp21 tmp34 = tmp33 * tmp21 tmp35 = tmp34 * tmp21 tmp36 = tmp35 * tmp21 tmp37 = tmp36 * tmp21 tmp38 = tmp37 * tmp21 tmp39 = tmp38 * tmp21 tmp40 = tmp39 * tmp21 tmp41 = tmp40 * tmp21 tmp42 = tmp41 * tmp21 tmp43 = tmp42 * tmp21 tmp44 = tmp43 * tmp21 tmp45 = tmp19 + tmp44 tmp46 = tl.where(tmp18, tmp45, tmp19) tl.store(in_out_ptr0 + (x0), tmp46, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/m6/cm6g7ftzm55nml3oy3bwbqmue7bg6inetry6yvug7w4kb6ev5rty.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, sub_123], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] # Source node to ATen node mapping: # X => mul # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # sub => full_default # sub_123 => sub_123 # tau_m_38 => add_38 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %add_38 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_37, %div_38), kwargs = {}) # %sub_123 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_38), kwargs = {}) triton_poi_fused_add_div_mul_sub_15 = async_compile.triton('triton_poi_fused_add_div_mul_sub_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp3 + tmp28 tmp30 = tmp2 - tmp29 tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/it/citgidtovhh7ieg5w6kgdaztr74d5qimxq5xchpfcretvgwraeaz.py # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, tau_lo_39], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # tau_lo_39 => where_38 # tau_m_38 => add_38 # Graph fragment: # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %add_38 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_37, %div_38), kwargs = {}) # %where_38 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_38, %add_38, %where_37), kwargs = {}) triton_poi_fused_add_div_where_16 = async_compile.triton('triton_poi_fused_add_div_where_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (x0), xmask) tmp23 = tl.load(in_out_ptr0 + (x0), xmask) tmp24 = tl.load(in_ptr2 + (x0), xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp23 + tmp49 tmp51 = tl.where(tmp22, tmp50, tmp23) tl.store(in_out_ptr0 + (x0), tmp51, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ow/cowrvlpr3aq5lvglsdero5kvdkcaqmnxh7ylqy6wg5asbxkwbugf.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, sub_126], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] # Source node to ATen node mapping: # X => mul # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # sub => full_default # sub_126 => sub_126 # tau_m_39 => add_39 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %add_39 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_38, %div_39), kwargs = {}) # %sub_126 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_39), kwargs = {}) triton_poi_fused_add_div_mul_sub_17 = async_compile.triton('triton_poi_fused_add_div_mul_sub_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp28 * tmp1 tmp30 = tmp3 + tmp29 tmp31 = tmp2 - tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/z4/cz4ddiop67cjhvnymyeyxeouq7iyjrkpudsvnur2jqxca35zlu35.py # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, tau_lo_40], Original ATen: [aten.div, aten.add, aten.where] # Source node to ATen node mapping: # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # tau_lo_40 => where_39 # tau_m_39 => add_39 # Graph fragment: # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %add_39 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_38, %div_39), kwargs = {}) # %where_39 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_39, %add_39, %where_38), kwargs = {}) triton_poi_fused_add_div_where_18 = async_compile.triton('triton_poi_fused_add_div_where_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_where_18(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (x0), xmask) tmp23 = tl.load(in_out_ptr0 + (x0), xmask) tmp24 = tl.load(in_ptr2 + (x0), xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp49 * tmp25 tmp51 = tmp23 + tmp50 tmp52 = tl.where(tmp22, tmp51, tmp23) tl.store(in_out_ptr0 + (x0), tmp52, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/xp/cxphrrfdmolvfd4qixifhmbpdrlsdb6np4qopmkccumu7xs7u2vv.py # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_129], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] # Source node to ATen node mapping: # X => mul # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # dm_41 => div_40 # sub => full_default # sub_129 => sub_129 # tau_m_40 => add_40 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {}) # %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {}) # %sub_129 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_40), kwargs = {}) triton_poi_fused_add_div_mul_sub_19 = async_compile.triton('triton_poi_fused_add_div_mul_sub_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp28 * tmp1 tmp30 = tmp29 * tmp1 tmp31 = tmp3 + tmp30 tmp32 = tmp2 - tmp31 tl.store(out_ptr0 + (x2), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/wa/cwaa5hzx7dhpx3ghsy7xh5ykl5hdag5jzcuoogoqdl4kgv7awvbb.py # Topologically Sorted Source Nodes: [sub, X, f_lo, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, tau_lo_41, dm_42, tau_m_41, sub_132, clamp_42, truediv_42, p_m_41, sum_43, f_m_41, mul_42, tau_lo_42, dm_43, tau_m_42, sub_135, clamp_43, truediv_43, p_m_42, sum_44, f_m_42, mul_43, tau_lo_43, dm_44, tau_m_43, sub_138, clamp_44, truediv_44, p_m_43, sum_45, f_m_43, tau_lo_44, dm_45, tau_m_44, sub_141, clamp_45, truediv_45, p_m_44, sum_46, tau_lo_45, dm_46, tau_m_45, sub_144, clamp_46, truediv_46, p_m_45, sum_47, tau_lo_46, dm_47, tau_m_46, sub_147, clamp_47, truediv_47, p_m_46, sum_48, tau_lo_47, dm_48, tau_m_47, sub_150, clamp_48, truediv_48, p_m_47, sum_49, tau_lo_48, dm_49, tau_m_48, sub_153, clamp_49, truediv_49, p_m_48, sum_50, tau_lo_49, dm_50, tau_m_49, sub_156, clamp_50, truediv_50, p_m_49, sum_52], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] # Source node to ATen node mapping: # X => mul # clamp_42 => clamp_min_42 # clamp_43 => clamp_min_43 # clamp_44 => clamp_min_44 # clamp_45 => clamp_min_45 # clamp_46 => clamp_min_46 # clamp_47 => clamp_min_47 # clamp_48 => clamp_min_48 # clamp_49 => clamp_min_49 # clamp_50 => clamp_min_50 # dm_16 => div_15 # dm_17 => div_16 # dm_18 => div_17 # dm_19 => div_18 # dm_20 => div_19 # dm_21 => div_20 # dm_22 => div_21 # dm_23 => div_22 # dm_24 => div_23 # dm_25 => div_24 # dm_26 => div_25 # dm_27 => div_26 # dm_28 => div_27 # dm_29 => div_28 # dm_30 => div_29 # dm_31 => div_30 # dm_32 => div_31 # dm_33 => div_32 # dm_34 => div_33 # dm_35 => div_34 # dm_36 => div_35 # dm_37 => div_36 # dm_38 => div_37 # dm_39 => div_38 # dm_40 => div_39 # dm_41 => div_40 # dm_42 => div_41 # dm_43 => div_42 # dm_44 => div_43 # dm_45 => div_44 # dm_46 => div_45 # dm_47 => div_46 # dm_48 => div_47 # dm_49 => div_48 # dm_50 => div_49 # f_lo => sub_7 # f_m_41 => sub_134 # f_m_42 => sub_137 # f_m_43 => sub_140 # mul_42 => mul_85 # mul_43 => mul_87 # p_m_41 => pow_45 # p_m_42 => pow_46 # p_m_43 => pow_47 # p_m_44 => pow_48 # p_m_45 => pow_49 # p_m_46 => pow_50 # p_m_47 => pow_51 # p_m_48 => pow_52 # p_m_49 => pow_53 # sub => full_default # sub_132 => sub_132 # sub_135 => sub_135 # sub_138 => sub_138 # sub_141 => sub_141 # sub_144 => sub_144 # sub_147 => sub_147 # sub_150 => sub_150 # sub_153 => sub_153 # sub_156 => sub_156 # sum_43 => sum_43 # sum_44 => sum_44 # sum_45 => sum_45 # sum_46 => sum_46 # sum_47 => sum_47 # sum_48 => sum_48 # sum_49 => sum_49 # sum_50 => sum_50 # sum_52 => sum_52 # tau_lo_41 => where_40 # tau_lo_42 => where_41 # tau_lo_43 => where_42 # tau_lo_44 => where_43 # tau_lo_45 => where_44 # tau_lo_46 => where_45 # tau_lo_47 => where_46 # tau_lo_48 => where_47 # tau_lo_49 => where_48 # tau_m_40 => add_40 # tau_m_41 => add_41 # tau_m_42 => add_42 # tau_m_43 => add_43 # tau_m_44 => add_44 # tau_m_45 => add_45 # tau_m_46 => add_46 # tau_m_47 => add_47 # tau_m_48 => add_48 # tau_m_49 => add_49 # truediv_42 => full_default_45 # truediv_43 => full_default_46 # truediv_44 => full_default_47 # truediv_45 => full_default_48 # truediv_46 => full_default_49 # truediv_47 => full_default_50 # truediv_48 => full_default_51 # truediv_49 => full_default_52 # truediv_50 => full_default_53 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %sub_7 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {}) # %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {}) # %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {}) # %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {}) # %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {}) # %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {}) # %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {}) # %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {}) # %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {}) # %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {}) # %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {}) # %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {}) # %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {}) # %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {}) # %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {}) # %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {}) # %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {}) # %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {}) # %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {}) # %where_40 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_40, %add_40, %where_39), kwargs = {}) # %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {}) # %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_40, %div_41), kwargs = {}) # %sub_132 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_41), kwargs = {}) # %clamp_min_42 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_132, 0), kwargs = {}) # %full_default_45 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_45 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_42, %full_default_45), kwargs = {}) # %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_45, [-1]), kwargs = {}) # %sub_134 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_43, 1), kwargs = {}) # %mul_85 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_134, %sub_7), kwargs = {}) # %where_41 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_41, %add_41, %where_40), kwargs = {}) # %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {}) # %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_41, %div_42), kwargs = {}) # %sub_135 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_42), kwargs = {}) # %clamp_min_43 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_135, 0), kwargs = {}) # %full_default_46 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_46 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_43, %full_default_46), kwargs = {}) # %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_46, [-1]), kwargs = {}) # %sub_137 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_44, 1), kwargs = {}) # %mul_87 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_137, %sub_7), kwargs = {}) # %where_42 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_42, %add_42, %where_41), kwargs = {}) # %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_42, 2), kwargs = {}) # %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_42, %div_43), kwargs = {}) # %sub_138 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_43), kwargs = {}) # %clamp_min_44 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_138, 0), kwargs = {}) # %full_default_47 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_47 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_44, %full_default_47), kwargs = {}) # %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_47, [-1]), kwargs = {}) # %sub_140 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_45, 1), kwargs = {}) # %where_43 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_43, %add_43, %where_42), kwargs = {}) # %div_44 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_43, 2), kwargs = {}) # %add_44 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_43, %div_44), kwargs = {}) # %sub_141 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_44), kwargs = {}) # %clamp_min_45 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_141, 0), kwargs = {}) # %full_default_48 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_48 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_45, %full_default_48), kwargs = {}) # %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_48, [-1]), kwargs = {}) # %where_44 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_44, %add_44, %where_43), kwargs = {}) # %div_45 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_44, 2), kwargs = {}) # %add_45 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_44, %div_45), kwargs = {}) # %sub_144 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_45), kwargs = {}) # %clamp_min_46 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_144, 0), kwargs = {}) # %full_default_49 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_49 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_46, %full_default_49), kwargs = {}) # %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_49, [-1]), kwargs = {}) # %where_45 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_45, %add_45, %where_44), kwargs = {}) # %div_46 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_45, 2), kwargs = {}) # %add_46 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_45, %div_46), kwargs = {}) # %sub_147 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_46), kwargs = {}) # %clamp_min_47 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_147, 0), kwargs = {}) # %full_default_50 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_50 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_47, %full_default_50), kwargs = {}) # %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_50, [-1]), kwargs = {}) # %where_46 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_46, %add_46, %where_45), kwargs = {}) # %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {}) # %add_47 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_46, %div_47), kwargs = {}) # %sub_150 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_47), kwargs = {}) # %clamp_min_48 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_150, 0), kwargs = {}) # %full_default_51 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_51 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_48, %full_default_51), kwargs = {}) # %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_51, [-1]), kwargs = {}) # %where_47 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_47, %add_47, %where_46), kwargs = {}) # %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {}) # %add_48 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_47, %div_48), kwargs = {}) # %sub_153 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_48), kwargs = {}) # %clamp_min_49 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_153, 0), kwargs = {}) # %full_default_52 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_52 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_49, %full_default_52), kwargs = {}) # %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_52, [-1]), kwargs = {}) # %where_48 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_48, %add_48, %where_47), kwargs = {}) # %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {}) # %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {}) # %sub_156 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_49), kwargs = {}) # %clamp_min_50 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_156, 0), kwargs = {}) # %full_default_53 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_53 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_50, %full_default_53), kwargs = {}) # %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_53, [-1]), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr4'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20(in_out_ptr0, in_out_ptr4, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr7, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (x0), xmask) tmp23 = tl.load(in_out_ptr0 + (x0), xmask) tmp24 = tl.load(in_ptr2 + (x0), xmask) tmp56 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last') tmp61 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp67 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp73 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp49 * tmp25 tmp51 = tmp50 * tmp25 tmp52 = tmp23 + tmp51 tmp53 = tl.where(tmp22, tmp52, tmp23) tmp54 = tmp51 * tmp25 tmp55 = tmp53 + tmp54 tmp57 = tmp56 * tmp25 tmp58 = tmp57 - tmp55 tmp59 = triton_helpers.maximum(tmp58, tmp1) tmp60 = libdevice.pow(tmp59, tmp3) tmp62 = tmp61 * tmp25 tmp63 = tmp62 - tmp55 tmp64 = triton_helpers.maximum(tmp63, tmp1) tmp65 = libdevice.pow(tmp64, tmp3) tmp66 = tmp60 + tmp65 tmp68 = tmp67 * tmp25 tmp69 = tmp68 - tmp55 tmp70 = triton_helpers.maximum(tmp69, tmp1) tmp71 = libdevice.pow(tmp70, tmp3) tmp72 = tmp66 + tmp71 tmp74 = tmp73 * tmp25 tmp75 = tmp74 - tmp55 tmp76 = triton_helpers.maximum(tmp75, tmp1) tmp77 = libdevice.pow(tmp76, tmp3) tmp78 = tmp72 + tmp77 tmp79 = tmp78 - tmp17 tmp80 = tmp79 * tmp20 tmp81 = tmp80 >= tmp1 tmp82 = tl.where(tmp81, tmp55, tmp53) tmp83 = tmp54 * tmp25 tmp84 = tmp82 + tmp83 tmp85 = tmp57 - tmp84 tmp86 = triton_helpers.maximum(tmp85, tmp1) tmp87 = libdevice.pow(tmp86, tmp3) tmp88 = tmp62 - tmp84 tmp89 = triton_helpers.maximum(tmp88, tmp1) tmp90 = libdevice.pow(tmp89, tmp3) tmp91 = tmp87 + tmp90 tmp92 = tmp68 - tmp84 tmp93 = triton_helpers.maximum(tmp92, tmp1) tmp94 = libdevice.pow(tmp93, tmp3) tmp95 = tmp91 + tmp94 tmp96 = tmp74 - tmp84 tmp97 = triton_helpers.maximum(tmp96, tmp1) tmp98 = libdevice.pow(tmp97, tmp3) tmp99 = tmp95 + tmp98 tmp100 = tmp99 - tmp17 tmp101 = tmp100 * tmp20 tmp102 = tmp101 >= tmp1 tmp103 = tl.where(tmp102, tmp84, tmp82) tmp104 = tmp83 * tmp25 tmp105 = tmp103 + tmp104 tmp106 = tmp57 - tmp105 tmp107 = triton_helpers.maximum(tmp106, tmp1) tmp108 = libdevice.pow(tmp107, tmp3) tmp109 = tmp62 - tmp105 tmp110 = triton_helpers.maximum(tmp109, tmp1) tmp111 = libdevice.pow(tmp110, tmp3) tmp112 = tmp108 + tmp111 tmp113 = tmp68 - tmp105 tmp114 = triton_helpers.maximum(tmp113, tmp1) tmp115 = libdevice.pow(tmp114, tmp3) tmp116 = tmp112 + tmp115 tmp117 = tmp74 - tmp105 tmp118 = triton_helpers.maximum(tmp117, tmp1) tmp119 = libdevice.pow(tmp118, tmp3) tmp120 = tmp116 + tmp119 tmp121 = tmp120 - tmp17 tmp122 = tmp121 * tmp20 tmp123 = tmp122 >= tmp1 tmp124 = tl.where(tmp123, tmp105, tmp103) tmp125 = tmp104 * tmp25 tmp126 = tmp124 + tmp125 tmp127 = tmp57 - tmp126 tmp128 = triton_helpers.maximum(tmp127, tmp1) tmp129 = libdevice.pow(tmp128, tmp3) tmp130 = tmp62 - tmp126 tmp131 = triton_helpers.maximum(tmp130, tmp1) tmp132 = libdevice.pow(tmp131, tmp3) tmp133 = tmp129 + tmp132 tmp134 = tmp68 - tmp126 tmp135 = triton_helpers.maximum(tmp134, tmp1) tmp136 = libdevice.pow(tmp135, tmp3) tmp137 = tmp133 + tmp136 tmp138 = tmp74 - tmp126 tmp139 = triton_helpers.maximum(tmp138, tmp1) tmp140 = libdevice.pow(tmp139, tmp3) tmp141 = tmp137 + tmp140 tmp142 = tmp141 - tmp17 tmp143 = tmp142 * tmp20 tmp144 = tmp143 >= tmp1 tmp145 = tl.where(tmp144, tmp126, tmp124) tmp146 = tmp125 * tmp25 tmp147 = tmp145 + tmp146 tmp148 = tmp57 - tmp147 tmp149 = triton_helpers.maximum(tmp148, tmp1) tmp150 = libdevice.pow(tmp149, tmp3) tmp151 = tmp62 - tmp147 tmp152 = triton_helpers.maximum(tmp151, tmp1) tmp153 = libdevice.pow(tmp152, tmp3) tmp154 = tmp150 + tmp153 tmp155 = tmp68 - tmp147 tmp156 = triton_helpers.maximum(tmp155, tmp1) tmp157 = libdevice.pow(tmp156, tmp3) tmp158 = tmp154 + tmp157 tmp159 = tmp74 - tmp147 tmp160 = triton_helpers.maximum(tmp159, tmp1) tmp161 = libdevice.pow(tmp160, tmp3) tmp162 = tmp158 + tmp161 tmp163 = tmp162 - tmp17 tmp164 = tmp163 * tmp20 tmp165 = tmp164 >= tmp1 tmp166 = tl.where(tmp165, tmp147, tmp145) tmp167 = tmp146 * tmp25 tmp168 = tmp166 + tmp167 tmp169 = tmp57 - tmp168 tmp170 = triton_helpers.maximum(tmp169, tmp1) tmp171 = libdevice.pow(tmp170, tmp3) tmp172 = tmp62 - tmp168 tmp173 = triton_helpers.maximum(tmp172, tmp1) tmp174 = libdevice.pow(tmp173, tmp3) tmp175 = tmp171 + tmp174 tmp176 = tmp68 - tmp168 tmp177 = triton_helpers.maximum(tmp176, tmp1) tmp178 = libdevice.pow(tmp177, tmp3) tmp179 = tmp175 + tmp178 tmp180 = tmp74 - tmp168 tmp181 = triton_helpers.maximum(tmp180, tmp1) tmp182 = libdevice.pow(tmp181, tmp3) tmp183 = tmp179 + tmp182 tmp184 = tmp183 - tmp17 tmp185 = tmp184 * tmp20 tmp186 = tmp185 >= tmp1 tmp187 = tl.where(tmp186, tmp168, tmp166) tmp188 = tmp167 * tmp25 tmp189 = tmp187 + tmp188 tmp190 = tmp57 - tmp189 tmp191 = triton_helpers.maximum(tmp190, tmp1) tmp192 = libdevice.pow(tmp191, tmp3) tmp193 = tmp62 - tmp189 tmp194 = triton_helpers.maximum(tmp193, tmp1) tmp195 = libdevice.pow(tmp194, tmp3) tmp196 = tmp192 + tmp195 tmp197 = tmp68 - tmp189 tmp198 = triton_helpers.maximum(tmp197, tmp1) tmp199 = libdevice.pow(tmp198, tmp3) tmp200 = tmp196 + tmp199 tmp201 = tmp74 - tmp189 tmp202 = triton_helpers.maximum(tmp201, tmp1) tmp203 = libdevice.pow(tmp202, tmp3) tmp204 = tmp200 + tmp203 tmp205 = tmp204 - tmp17 tmp206 = tmp205 * tmp20 tmp207 = tmp206 >= tmp1 tmp208 = tl.where(tmp207, tmp189, tmp187) tmp209 = tmp188 * tmp25 tmp210 = tmp208 + tmp209 tmp211 = tmp57 - tmp210 tmp212 = triton_helpers.maximum(tmp211, tmp1) tmp213 = libdevice.pow(tmp212, tmp3) tmp214 = tmp62 - tmp210 tmp215 = triton_helpers.maximum(tmp214, tmp1) tmp216 = libdevice.pow(tmp215, tmp3) tmp217 = tmp213 + tmp216 tmp218 = tmp68 - tmp210 tmp219 = triton_helpers.maximum(tmp218, tmp1) tmp220 = libdevice.pow(tmp219, tmp3) tmp221 = tmp217 + tmp220 tmp222 = tmp74 - tmp210 tmp223 = triton_helpers.maximum(tmp222, tmp1) tmp224 = libdevice.pow(tmp223, tmp3) tmp225 = tmp221 + tmp224 tmp226 = tmp225 - tmp17 tmp227 = tmp226 * tmp20 tmp228 = tmp227 >= tmp1 tmp229 = tl.where(tmp228, tmp210, tmp208) tmp230 = tmp209 * tmp25 tmp231 = tmp229 + tmp230 tmp232 = tmp57 - tmp231 tmp233 = triton_helpers.maximum(tmp232, tmp1) tmp234 = libdevice.pow(tmp233, tmp3) tmp235 = tmp62 - tmp231 tmp236 = triton_helpers.maximum(tmp235, tmp1) tmp237 = libdevice.pow(tmp236, tmp3) tmp238 = tmp234 + tmp237 tmp239 = tmp68 - tmp231 tmp240 = triton_helpers.maximum(tmp239, tmp1) tmp241 = libdevice.pow(tmp240, tmp3) tmp242 = tmp238 + tmp241 tmp243 = tmp74 - tmp231 tmp244 = triton_helpers.maximum(tmp243, tmp1) tmp245 = libdevice.pow(tmp244, tmp3) tmp246 = tmp242 + tmp245 tl.store(in_out_ptr4 + (x0), tmp231, xmask) tl.store(out_ptr7 + (x0), tmp246, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pm/cpmji3aw76p4a3ywro7icjwdbgchdizzkcjfhno53435xsk6mwfd.py # Topologically Sorted Source Nodes: [sub, X, dm_45, dm_46, dm_47, dm_48, tau_m_47, tau_lo_48, dm_49, tau_m_48, tau_lo_49, dm_50, tau_m_49, sub_156, clamp_50, truediv_50, p_m_49, p_m_50], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow] # Source node to ATen node mapping: # X => mul # clamp_50 => clamp_min_50 # dm_45 => div_44 # dm_46 => div_45 # dm_47 => div_46 # dm_48 => div_47 # dm_49 => div_48 # dm_50 => div_49 # p_m_49 => pow_53 # p_m_50 => div_50 # sub => full_default # sub_156 => sub_156 # tau_lo_48 => where_47 # tau_lo_49 => where_48 # tau_m_47 => add_47 # tau_m_48 => add_48 # tau_m_49 => add_49 # truediv_50 => full_default_53 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %div_44 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_43, 2), kwargs = {}) # %div_45 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_44, 2), kwargs = {}) # %div_46 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_45, 2), kwargs = {}) # %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {}) # %add_47 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_46, %div_47), kwargs = {}) # %where_47 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_47, %add_47, %where_46), kwargs = {}) # %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {}) # %add_48 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_47, %div_48), kwargs = {}) # %where_48 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_48, %add_48, %where_47), kwargs = {}) # %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {}) # %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {}) # %sub_156 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_49), kwargs = {}) # %clamp_min_50 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_156, 0), kwargs = {}) # %full_default_53 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %pow_53 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_50, %full_default_53), kwargs = {}) # %div_50 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_53, %unsqueeze_50), kwargs = {}) triton_poi_fused_add_clamp_div_mul_pow_sub_where_21 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_where_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_where_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_where_21(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 2.0 tmp8 = libdevice.pow(tmp6, tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf35 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf51 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf52 = reinterpret_tensor(buf51, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf51 # reuse buf53 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, X, max_1, pow_2, tau_hi, pow_1, tau_lo, dm, dm_1, tau_m, sub_9, clamp_1, truediv_1, p_m, sum_2, sub_5, clamp, truediv, pow_3, sum_1, f_lo, tau_lo_1, dm_2, tau_m_1, sub_12, clamp_2, truediv_2, p_m_1, sum_3, f_m_1, mul_2, tau_lo_2, dm_3, tau_m_2, sub_15, clamp_3, truediv_3, p_m_2, sum_4, f_m_2, mul_3, tau_lo_3, dm_4, tau_m_3, sub_18, clamp_4, truediv_4, p_m_3, sum_5, f_m_3, mul_4, tau_lo_4, dm_5, tau_m_4, sub_21, clamp_5, truediv_5, p_m_4, sum_6, f_m_4, mul_5, tau_lo_5, dm_6, tau_m_5, sub_24, clamp_6, truediv_6, p_m_5, sum_7, f_m_5, mul_6, tau_lo_6, dm_7, tau_m_6, sub_27, clamp_7, truediv_7, p_m_6, sum_8, f_m_6, mul_7, tau_lo_7, dm_8, tau_m_7, sub_30, clamp_8, truediv_8, p_m_7, sum_9, f_m_7, mul_8, tau_lo_8, dm_9, tau_m_8, sub_33, clamp_9, truediv_9, p_m_8, sum_10, f_m_8, mul_9, tau_lo_9, dm_10, tau_m_9, sub_36, clamp_10, truediv_10, p_m_9, sum_11, f_m_9, mul_10, tau_lo_10, dm_11, tau_m_10, sub_39, clamp_11, truediv_11, p_m_10, sum_12, f_m_10, mul_11, tau_lo_11, dm_12, tau_m_11, sub_42, clamp_12, truediv_12, p_m_11, sum_13, f_m_11, mul_12, tau_lo_12, dm_13, tau_m_12, sub_45, clamp_13, truediv_13, p_m_12, sum_14, f_m_12, mul_13, tau_lo_13, dm_14, tau_m_13, sub_48, clamp_14, truediv_14, p_m_13, sum_15, f_m_13, mul_14, tau_lo_14, dm_15, tau_m_14, sub_51, clamp_15, truediv_15, p_m_14, sum_16, tau_lo_15, dm_16, tau_m_15, sub_54, clamp_16, truediv_16, p_m_15, sum_17, f_m_15, tau_lo_16, dm_17, tau_m_16, sub_57, clamp_17, truediv_17, p_m_16, sum_18, tau_lo_17, dm_18, tau_m_17, sub_60, clamp_18, truediv_18, p_m_17, sum_19, tau_lo_18, dm_19, tau_m_18, sub_63, clamp_19, truediv_19, p_m_18, sum_20, tau_lo_19, dm_20, tau_m_19, sub_66, clamp_20, truediv_20, p_m_19, sum_21, tau_lo_20, dm_21, tau_m_20, sub_69, clamp_21, truediv_21, p_m_20, sum_22, tau_lo_21, dm_22, tau_m_21, sub_72, clamp_22, truediv_22, p_m_21, sum_23, tau_lo_22, dm_23, tau_m_22, sub_75, clamp_23, truediv_23, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_78, clamp_24, truediv_24, p_m_23, sum_25, tau_lo_24, dm_25, tau_m_24, sub_81, clamp_25, truediv_25, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_84, clamp_26, truediv_26, p_m_25, sum_27], Original ATen: [aten.sub, aten.mul, aten.max, aten.pow, aten.div, aten.add, aten.clamp, aten.sum, aten.where] stream0 = get_raw_stream(0) triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0.run(buf52, arg0_1, buf1, buf35, buf53, 64, grid=grid(64), stream=stream0) buf54 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, tau_m_25, tau_lo_26, dm_27, tau_m_26, sub_87, clamp_27, truediv_27, p_m_26], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow] triton_poi_fused_add_clamp_div_mul_pow_sub_where_1.run(arg0_1, buf53, buf1, buf52, buf35, buf54, 256, grid=grid(256), stream=stream0) buf55 = buf52; del buf52 # reuse buf56 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, tau_m_25, tau_lo_26, dm_27, tau_m_26, tau_lo_27, dm_28, tau_m_27, sub_90, clamp_28, truediv_28, p_m_27, sum_29], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2.run(buf55, buf54, buf1, buf53, buf35, arg0_1, buf56, 64, grid=grid(64), stream=stream0) buf57 = buf54; del buf54 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, sub_93], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where] triton_poi_fused_add_div_mul_sub_where_3.run(arg0_1, buf56, buf1, buf55, buf35, buf57, 256, grid=grid(256), stream=stream0) buf58 = buf55; del buf55 # reuse buf59 = buf53; del buf53 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, tau_lo_29, dm_30, tau_m_29, sub_96, clamp_30, truediv_30, p_m_29, sum_31], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4.run(buf58, buf57, buf1, buf56, buf35, arg0_1, buf59, 64, grid=grid(64), stream=stream0) buf60 = buf57; del buf57 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, sub_99], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where] triton_poi_fused_add_div_mul_sub_where_5.run(arg0_1, buf59, buf1, buf58, buf35, buf60, 256, grid=grid(256), stream=stream0) buf61 = buf58; del buf58 # reuse buf66 = buf56; del buf56 # reuse buf67 = reinterpret_tensor(buf66, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf66 # reuse # Topologically Sorted Source Nodes: [sub, X, f_lo, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, tau_lo_31, dm_32, tau_m_31, sub_102, clamp_32, truediv_32, p_m_31, sum_33, tau_lo_32, dm_33, tau_m_32, sub_105, clamp_33, truediv_33, p_m_32, sum_34, f_m_32, mul_33, tau_lo_33, dm_34, tau_m_33, sub_108, clamp_34, truediv_34, p_m_33, sum_35, tau_lo_34], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6.run(buf61, buf67, buf60, buf1, buf59, buf35, arg0_1, 64, grid=grid(64), stream=stream0) buf68 = buf60; del buf60 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, tau_m_34, sub_111, clamp_35, truediv_35, p_m_34], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow] triton_poi_fused_add_clamp_div_mul_pow_sub_7.run(arg0_1, buf67, buf35, buf68, 256, grid=grid(256), stream=stream0) buf69 = buf67; del buf67 # reuse # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, tau_m_34, tau_lo_35], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_8.run(buf69, buf68, buf1, buf35, 64, grid=grid(64), stream=stream0) buf70 = buf68; del buf68 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, tau_m_35, sub_114, clamp_36, truediv_36, p_m_35], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow] triton_poi_fused_add_clamp_div_mul_pow_sub_9.run(arg0_1, buf69, buf35, buf70, 256, grid=grid(256), stream=stream0) buf71 = buf69; del buf69 # reuse # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, tau_m_35, tau_lo_36], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_10.run(buf71, buf70, buf1, buf35, 64, grid=grid(64), stream=stream0) buf72 = buf70; del buf70 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, tau_m_36, sub_117, clamp_37], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp] triton_poi_fused_add_clamp_div_mul_sub_11.run(arg0_1, buf71, buf35, buf72, 256, grid=grid(256), stream=stream0) buf73 = buf71; del buf71 # reuse # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, tau_m_36, tau_lo_37], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_12.run(buf73, buf72, buf1, buf35, 64, grid=grid(64), stream=stream0) buf74 = buf72; del buf72 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, sub_120, clamp_38], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp] triton_poi_fused_add_clamp_div_mul_sub_13.run(arg0_1, buf73, buf35, buf74, 256, grid=grid(256), stream=stream0) buf75 = buf73; del buf73 # reuse # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, tau_lo_38], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_14.run(buf75, buf74, buf1, buf35, 64, grid=grid(64), stream=stream0) buf76 = buf74; del buf74 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, sub_123], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] triton_poi_fused_add_div_mul_sub_15.run(arg0_1, buf75, buf35, buf76, 256, grid=grid(256), stream=stream0) buf77 = buf75; del buf75 # reuse # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, tau_lo_39], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_16.run(buf77, buf76, buf1, buf35, 64, grid=grid(64), stream=stream0) buf78 = buf76; del buf76 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, sub_126], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] triton_poi_fused_add_div_mul_sub_17.run(arg0_1, buf77, buf35, buf78, 256, grid=grid(256), stream=stream0) buf79 = buf77; del buf77 # reuse # Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, tau_lo_40], Original ATen: [aten.div, aten.add, aten.where] triton_poi_fused_add_div_where_18.run(buf79, buf78, buf1, buf35, 64, grid=grid(64), stream=stream0) buf80 = buf78; del buf78 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_129], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] triton_poi_fused_add_div_mul_sub_19.run(arg0_1, buf79, buf35, buf80, 256, grid=grid(256), stream=stream0) buf81 = buf79; del buf79 # reuse buf95 = reinterpret_tensor(buf61, (4, 4, 4), (16, 4, 1), 0); del buf61 # reuse buf96 = reinterpret_tensor(buf95, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf95 # reuse buf97 = buf59; del buf59 # reuse # Topologically Sorted Source Nodes: [sub, X, f_lo, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, tau_lo_41, dm_42, tau_m_41, sub_132, clamp_42, truediv_42, p_m_41, sum_43, f_m_41, mul_42, tau_lo_42, dm_43, tau_m_42, sub_135, clamp_43, truediv_43, p_m_42, sum_44, f_m_42, mul_43, tau_lo_43, dm_44, tau_m_43, sub_138, clamp_44, truediv_44, p_m_43, sum_45, f_m_43, tau_lo_44, dm_45, tau_m_44, sub_141, clamp_45, truediv_45, p_m_44, sum_46, tau_lo_45, dm_46, tau_m_45, sub_144, clamp_46, truediv_46, p_m_45, sum_47, tau_lo_46, dm_47, tau_m_46, sub_147, clamp_47, truediv_47, p_m_46, sum_48, tau_lo_47, dm_48, tau_m_47, sub_150, clamp_48, truediv_48, p_m_47, sum_49, tau_lo_48, dm_49, tau_m_48, sub_153, clamp_49, truediv_49, p_m_48, sum_50, tau_lo_49, dm_50, tau_m_49, sub_156, clamp_50, truediv_50, p_m_49, sum_52], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum] triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20.run(buf81, buf96, buf80, buf1, buf35, arg0_1, buf97, 64, grid=grid(64), stream=stream0) del buf1 del buf35 del buf81 buf98 = buf80; del buf80 # reuse # Topologically Sorted Source Nodes: [sub, X, dm_45, dm_46, dm_47, dm_48, tau_m_47, tau_lo_48, dm_49, tau_m_48, tau_lo_49, dm_50, tau_m_49, sub_156, clamp_50, truediv_50, p_m_49, p_m_50], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow] triton_poi_fused_add_clamp_div_mul_pow_sub_where_21.run(arg0_1, buf96, buf97, buf98, 256, grid=grid(256), stream=stream0) del arg0_1 del buf96 del buf97 return (buf98, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import torch import torch.nn as nn def entmax_bisect(X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True): """alpha-entmax: normalizing sparse transform (a la softmax). Solves the optimization problem: max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1. where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1, using a bisection (root finding, binary search) algorithm. This function is differentiable with respect to both X and alpha. Parameters ---------- X : torch.Tensor The input tensor. alpha : float or torch.Tensor Tensor of alpha parameters (> 1) to use. If scalar or python float, the same value is used for all rows, otherwise, it must have shape (or be expandable to) alpha.shape[j] == (X.shape[j] if j != dim else 1) A value of alpha=2 corresponds to sparsemax, and alpha=1 corresponds to softmax (but computing it this way is likely unstable). dim : int The dimension along which to apply alpha-entmax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. ensure_sum_one : bool, Whether to divide the result by its sum. If false, the result might sum to close but not exactly 1, which might cause downstream problems. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return EntmaxBisectFunction.apply(X, alpha, dim, n_iter, ensure_sum_one) class EntmaxBisectFunction(Function): @classmethod def _gp(cls, x, alpha): return x ** (alpha - 1) @classmethod def _gp_inv(cls, y, alpha): return y ** (1 / (alpha - 1)) @classmethod def _p(cls, X, alpha): return cls._gp_inv(torch.clamp(X, min=0), alpha) @classmethod def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True ): if not isinstance(alpha, torch.Tensor): alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device) alpha_shape = list(X.shape) alpha_shape[dim] = 1 alpha = alpha.expand(*alpha_shape) ctx.alpha = alpha ctx.dim = dim d = X.shape[dim] X = X * (alpha - 1) max_val, _ = X.max(dim=dim, keepdim=True) tau_lo = max_val - cls._gp(1, alpha) tau_hi = max_val - cls._gp(1 / d, alpha) f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1 dm = tau_hi - tau_lo for it in range(n_iter): dm /= 2 tau_m = tau_lo + dm p_m = cls._p(X - tau_m, alpha) f_m = p_m.sum(dim) - 1 mask = (f_m * f_lo >= 0).unsqueeze(dim) tau_lo = torch.where(mask, tau_m, tau_lo) if ensure_sum_one: p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim) ctx.save_for_backward(p_m) return p_m @classmethod def backward(cls, ctx, dY): Y, = ctx.saved_tensors gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1)) dX = dY * gppr q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) q = q.unsqueeze(ctx.dim) dX -= q * gppr d_alpha = None if ctx.needs_input_grad[1]: S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1)) ent = S.sum(ctx.dim).unsqueeze(ctx.dim) Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim) d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2 d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1) d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim) return dX, d_alpha, None, None, None class EntmaxBisect(nn.Module): def __init__(self, alpha=1.5, dim=-1, n_iter=50): """alpha-entmax: normalizing sparse map (a la softmax) via bisection. Solves the optimization problem: max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1. where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1, using a bisection (root finding, binary search) algorithm. Parameters ---------- alpha : float or torch.Tensor Tensor of alpha parameters (> 1) to use. If scalar or python float, the same value is used for all rows, otherwise, it must have shape (or be expandable to) alpha.shape[j] == (X.shape[j] if j != dim else 1) A value of alpha=2 corresponds to sparsemax; alpha=1 corresponds to softmax (but computing it this way is likely unstable). dim : int The dimension along which to apply alpha-entmax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. """ self.dim = dim self.n_iter = n_iter self.alpha = alpha super().__init__() def forward(self, X): return entmax_bisect(X, alpha=self.alpha, dim=self.dim, n_iter=self .n_iter) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0(in_out_ptr12, in_ptr0, out_ptr0, out_ptr19, out_ptr27, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 * tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 * tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp14 = tmp11 - tmp1 tmp15 = tmp14 - tmp13 tmp16 = tmp15 * tmp1 tmp17 = tmp13 + tmp16 tmp18 = tmp2 - tmp17 tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = 2.0 tmp22 = libdevice.pow(tmp20, tmp21) tmp23 = tmp4 - tmp17 tmp24 = triton_helpers.maximum(tmp23, tmp19) tmp25 = libdevice.pow(tmp24, tmp21) tmp26 = tmp22 + tmp25 tmp27 = tmp7 - tmp17 tmp28 = triton_helpers.maximum(tmp27, tmp19) tmp29 = libdevice.pow(tmp28, tmp21) tmp30 = tmp26 + tmp29 tmp31 = tmp10 - tmp17 tmp32 = triton_helpers.maximum(tmp31, tmp19) tmp33 = libdevice.pow(tmp32, tmp21) tmp34 = tmp30 + tmp33 tmp35 = tmp2 - tmp13 tmp36 = triton_helpers.maximum(tmp35, tmp19) tmp37 = libdevice.pow(tmp36, tmp21) tmp38 = tmp4 - tmp13 tmp39 = triton_helpers.maximum(tmp38, tmp19) tmp40 = libdevice.pow(tmp39, tmp21) tmp41 = tmp37 + tmp40 tmp42 = tmp7 - tmp13 tmp43 = triton_helpers.maximum(tmp42, tmp19) tmp44 = libdevice.pow(tmp43, tmp21) tmp45 = tmp41 + tmp44 tmp46 = tmp10 - tmp13 tmp47 = triton_helpers.maximum(tmp46, tmp19) tmp48 = libdevice.pow(tmp47, tmp21) tmp49 = tmp45 + tmp48 tmp50 = tmp34 - tmp12 tmp51 = tmp49 - tmp12 tmp52 = tmp50 * tmp51 tmp53 = tmp52 >= tmp19 tmp54 = tl.where(tmp53, tmp17, tmp13) tmp55 = tmp16 * tmp1 tmp56 = tmp54 + tmp55 tmp57 = tmp2 - tmp56 tmp58 = triton_helpers.maximum(tmp57, tmp19) tmp59 = libdevice.pow(tmp58, tmp21) tmp60 = tmp4 - tmp56 tmp61 = triton_helpers.maximum(tmp60, tmp19) tmp62 = libdevice.pow(tmp61, tmp21) tmp63 = tmp59 + tmp62 tmp64 = tmp7 - tmp56 tmp65 = triton_helpers.maximum(tmp64, tmp19) tmp66 = libdevice.pow(tmp65, tmp21) tmp67 = tmp63 + tmp66 tmp68 = tmp10 - tmp56 tmp69 = triton_helpers.maximum(tmp68, tmp19) tmp70 = libdevice.pow(tmp69, tmp21) tmp71 = tmp67 + tmp70 tmp72 = tmp71 - tmp12 tmp73 = tmp72 * tmp51 tmp74 = tmp73 >= tmp19 tmp75 = tl.where(tmp74, tmp56, tmp54) tmp76 = tmp55 * tmp1 tmp77 = tmp75 + tmp76 tmp78 = tmp2 - tmp77 tmp79 = triton_helpers.maximum(tmp78, tmp19) tmp80 = libdevice.pow(tmp79, tmp21) tmp81 = tmp4 - tmp77 tmp82 = triton_helpers.maximum(tmp81, tmp19) tmp83 = libdevice.pow(tmp82, tmp21) tmp84 = tmp80 + tmp83 tmp85 = tmp7 - tmp77 tmp86 = triton_helpers.maximum(tmp85, tmp19) tmp87 = libdevice.pow(tmp86, tmp21) tmp88 = tmp84 + tmp87 tmp89 = tmp10 - tmp77 tmp90 = triton_helpers.maximum(tmp89, tmp19) tmp91 = libdevice.pow(tmp90, tmp21) tmp92 = tmp88 + tmp91 tmp93 = tmp92 - tmp12 tmp94 = tmp93 * tmp51 tmp95 = tmp94 >= tmp19 tmp96 = tl.where(tmp95, tmp77, tmp75) tmp97 = tmp76 * tmp1 tmp98 = tmp96 + tmp97 tmp99 = tmp2 - tmp98 tmp100 = triton_helpers.maximum(tmp99, tmp19) tmp101 = libdevice.pow(tmp100, tmp21) tmp102 = tmp4 - tmp98 tmp103 = triton_helpers.maximum(tmp102, tmp19) tmp104 = libdevice.pow(tmp103, tmp21) tmp105 = tmp101 + tmp104 tmp106 = tmp7 - tmp98 tmp107 = triton_helpers.maximum(tmp106, tmp19) tmp108 = libdevice.pow(tmp107, tmp21) tmp109 = tmp105 + tmp108 tmp110 = tmp10 - tmp98 tmp111 = triton_helpers.maximum(tmp110, tmp19) tmp112 = libdevice.pow(tmp111, tmp21) tmp113 = tmp109 + tmp112 tmp114 = tmp113 - tmp12 tmp115 = tmp114 * tmp51 tmp116 = tmp115 >= tmp19 tmp117 = tl.where(tmp116, tmp98, tmp96) tmp118 = tmp97 * tmp1 tmp119 = tmp117 + tmp118 tmp120 = tmp2 - tmp119 tmp121 = triton_helpers.maximum(tmp120, tmp19) tmp122 = libdevice.pow(tmp121, tmp21) tmp123 = tmp4 - tmp119 tmp124 = triton_helpers.maximum(tmp123, tmp19) tmp125 = libdevice.pow(tmp124, tmp21) tmp126 = tmp122 + tmp125 tmp127 = tmp7 - tmp119 tmp128 = triton_helpers.maximum(tmp127, tmp19) tmp129 = libdevice.pow(tmp128, tmp21) tmp130 = tmp126 + tmp129 tmp131 = tmp10 - tmp119 tmp132 = triton_helpers.maximum(tmp131, tmp19) tmp133 = libdevice.pow(tmp132, tmp21) tmp134 = tmp130 + tmp133 tmp135 = tmp134 - tmp12 tmp136 = tmp135 * tmp51 tmp137 = tmp136 >= tmp19 tmp138 = tl.where(tmp137, tmp119, tmp117) tmp139 = tmp118 * tmp1 tmp140 = tmp138 + tmp139 tmp141 = tmp2 - tmp140 tmp142 = triton_helpers.maximum(tmp141, tmp19) tmp143 = libdevice.pow(tmp142, tmp21) tmp144 = tmp4 - tmp140 tmp145 = triton_helpers.maximum(tmp144, tmp19) tmp146 = libdevice.pow(tmp145, tmp21) tmp147 = tmp143 + tmp146 tmp148 = tmp7 - tmp140 tmp149 = triton_helpers.maximum(tmp148, tmp19) tmp150 = libdevice.pow(tmp149, tmp21) tmp151 = tmp147 + tmp150 tmp152 = tmp10 - tmp140 tmp153 = triton_helpers.maximum(tmp152, tmp19) tmp154 = libdevice.pow(tmp153, tmp21) tmp155 = tmp151 + tmp154 tmp156 = tmp155 - tmp12 tmp157 = tmp156 * tmp51 tmp158 = tmp157 >= tmp19 tmp159 = tl.where(tmp158, tmp140, tmp138) tmp160 = tmp139 * tmp1 tmp161 = tmp159 + tmp160 tmp162 = tmp2 - tmp161 tmp163 = triton_helpers.maximum(tmp162, tmp19) tmp164 = libdevice.pow(tmp163, tmp21) tmp165 = tmp4 - tmp161 tmp166 = triton_helpers.maximum(tmp165, tmp19) tmp167 = libdevice.pow(tmp166, tmp21) tmp168 = tmp164 + tmp167 tmp169 = tmp7 - tmp161 tmp170 = triton_helpers.maximum(tmp169, tmp19) tmp171 = libdevice.pow(tmp170, tmp21) tmp172 = tmp168 + tmp171 tmp173 = tmp10 - tmp161 tmp174 = triton_helpers.maximum(tmp173, tmp19) tmp175 = libdevice.pow(tmp174, tmp21) tmp176 = tmp172 + tmp175 tmp177 = tmp176 - tmp12 tmp178 = tmp177 * tmp51 tmp179 = tmp178 >= tmp19 tmp180 = tl.where(tmp179, tmp161, tmp159) tmp181 = tmp160 * tmp1 tmp182 = tmp180 + tmp181 tmp183 = tmp2 - tmp182 tmp184 = triton_helpers.maximum(tmp183, tmp19) tmp185 = libdevice.pow(tmp184, tmp21) tmp186 = tmp4 - tmp182 tmp187 = triton_helpers.maximum(tmp186, tmp19) tmp188 = libdevice.pow(tmp187, tmp21) tmp189 = tmp185 + tmp188 tmp190 = tmp7 - tmp182 tmp191 = triton_helpers.maximum(tmp190, tmp19) tmp192 = libdevice.pow(tmp191, tmp21) tmp193 = tmp189 + tmp192 tmp194 = tmp10 - tmp182 tmp195 = triton_helpers.maximum(tmp194, tmp19) tmp196 = libdevice.pow(tmp195, tmp21) tmp197 = tmp193 + tmp196 tmp198 = tmp197 - tmp12 tmp199 = tmp198 * tmp51 tmp200 = tmp199 >= tmp19 tmp201 = tl.where(tmp200, tmp182, tmp180) tmp202 = tmp181 * tmp1 tmp203 = tmp201 + tmp202 tmp204 = tmp2 - tmp203 tmp205 = triton_helpers.maximum(tmp204, tmp19) tmp206 = libdevice.pow(tmp205, tmp21) tmp207 = tmp4 - tmp203 tmp208 = triton_helpers.maximum(tmp207, tmp19) tmp209 = libdevice.pow(tmp208, tmp21) tmp210 = tmp206 + tmp209 tmp211 = tmp7 - tmp203 tmp212 = triton_helpers.maximum(tmp211, tmp19) tmp213 = libdevice.pow(tmp212, tmp21) tmp214 = tmp210 + tmp213 tmp215 = tmp10 - tmp203 tmp216 = triton_helpers.maximum(tmp215, tmp19) tmp217 = libdevice.pow(tmp216, tmp21) tmp218 = tmp214 + tmp217 tmp219 = tmp218 - tmp12 tmp220 = tmp219 * tmp51 tmp221 = tmp220 >= tmp19 tmp222 = tl.where(tmp221, tmp203, tmp201) tmp223 = tmp202 * tmp1 tmp224 = tmp222 + tmp223 tmp225 = tmp2 - tmp224 tmp226 = triton_helpers.maximum(tmp225, tmp19) tmp227 = libdevice.pow(tmp226, tmp21) tmp228 = tmp4 - tmp224 tmp229 = triton_helpers.maximum(tmp228, tmp19) tmp230 = libdevice.pow(tmp229, tmp21) tmp231 = tmp227 + tmp230 tmp232 = tmp7 - tmp224 tmp233 = triton_helpers.maximum(tmp232, tmp19) tmp234 = libdevice.pow(tmp233, tmp21) tmp235 = tmp231 + tmp234 tmp236 = tmp10 - tmp224 tmp237 = triton_helpers.maximum(tmp236, tmp19) tmp238 = libdevice.pow(tmp237, tmp21) tmp239 = tmp235 + tmp238 tmp240 = tmp239 - tmp12 tmp241 = tmp240 * tmp51 tmp242 = tmp241 >= tmp19 tmp243 = tl.where(tmp242, tmp224, tmp222) tmp244 = tmp223 * tmp1 tmp245 = tmp243 + tmp244 tmp246 = tmp2 - tmp245 tmp247 = triton_helpers.maximum(tmp246, tmp19) tmp248 = libdevice.pow(tmp247, tmp21) tmp249 = tmp4 - tmp245 tmp250 = triton_helpers.maximum(tmp249, tmp19) tmp251 = libdevice.pow(tmp250, tmp21) tmp252 = tmp248 + tmp251 tmp253 = tmp7 - tmp245 tmp254 = triton_helpers.maximum(tmp253, tmp19) tmp255 = libdevice.pow(tmp254, tmp21) tmp256 = tmp252 + tmp255 tmp257 = tmp10 - tmp245 tmp258 = triton_helpers.maximum(tmp257, tmp19) tmp259 = libdevice.pow(tmp258, tmp21) tmp260 = tmp256 + tmp259 tmp261 = tmp260 - tmp12 tmp262 = tmp261 * tmp51 tmp263 = tmp262 >= tmp19 tmp264 = tl.where(tmp263, tmp245, tmp243) tmp265 = tmp244 * tmp1 tmp266 = tmp264 + tmp265 tmp267 = tmp2 - tmp266 tmp268 = triton_helpers.maximum(tmp267, tmp19) tmp269 = libdevice.pow(tmp268, tmp21) tmp270 = tmp4 - tmp266 tmp271 = triton_helpers.maximum(tmp270, tmp19) tmp272 = libdevice.pow(tmp271, tmp21) tmp273 = tmp269 + tmp272 tmp274 = tmp7 - tmp266 tmp275 = triton_helpers.maximum(tmp274, tmp19) tmp276 = libdevice.pow(tmp275, tmp21) tmp277 = tmp273 + tmp276 tmp278 = tmp10 - tmp266 tmp279 = triton_helpers.maximum(tmp278, tmp19) tmp280 = libdevice.pow(tmp279, tmp21) tmp281 = tmp277 + tmp280 tmp282 = tmp281 - tmp12 tmp283 = tmp282 * tmp51 tmp284 = tmp283 >= tmp19 tmp285 = tl.where(tmp284, tmp266, tmp264) tmp286 = tmp265 * tmp1 tmp287 = tmp285 + tmp286 tmp288 = tmp2 - tmp287 tmp289 = triton_helpers.maximum(tmp288, tmp19) tmp290 = libdevice.pow(tmp289, tmp21) tmp291 = tmp4 - tmp287 tmp292 = triton_helpers.maximum(tmp291, tmp19) tmp293 = libdevice.pow(tmp292, tmp21) tmp294 = tmp290 + tmp293 tmp295 = tmp7 - tmp287 tmp296 = triton_helpers.maximum(tmp295, tmp19) tmp297 = libdevice.pow(tmp296, tmp21) tmp298 = tmp294 + tmp297 tmp299 = tmp10 - tmp287 tmp300 = triton_helpers.maximum(tmp299, tmp19) tmp301 = libdevice.pow(tmp300, tmp21) tmp302 = tmp298 + tmp301 tmp303 = tmp302 - tmp12 tmp304 = tmp303 * tmp51 tmp305 = tmp304 >= tmp19 tmp306 = tl.where(tmp305, tmp287, tmp285) tmp307 = tmp286 * tmp1 tmp308 = tmp307 * tmp1 tmp309 = tmp306 + tmp307 tmp310 = tmp2 - tmp309 tmp311 = triton_helpers.maximum(tmp310, tmp19) tmp312 = libdevice.pow(tmp311, tmp21) tmp313 = tmp4 - tmp309 tmp314 = triton_helpers.maximum(tmp313, tmp19) tmp315 = libdevice.pow(tmp314, tmp21) tmp316 = tmp312 + tmp315 tmp317 = tmp7 - tmp309 tmp318 = triton_helpers.maximum(tmp317, tmp19) tmp319 = libdevice.pow(tmp318, tmp21) tmp320 = tmp316 + tmp319 tmp321 = tmp10 - tmp309 tmp322 = triton_helpers.maximum(tmp321, tmp19) tmp323 = libdevice.pow(tmp322, tmp21) tmp324 = tmp320 + tmp323 tmp325 = tmp324 - tmp12 tmp326 = tmp325 * tmp51 tmp327 = tmp326 >= tmp19 tmp328 = tl.where(tmp327, tmp309, tmp306) tmp329 = tmp328 + tmp308 tmp330 = tmp2 - tmp329 tmp331 = triton_helpers.maximum(tmp330, tmp19) tmp332 = libdevice.pow(tmp331, tmp21) tmp333 = tmp4 - tmp329 tmp334 = triton_helpers.maximum(tmp333, tmp19) tmp335 = libdevice.pow(tmp334, tmp21) tmp336 = tmp332 + tmp335 tmp337 = tmp7 - tmp329 tmp338 = triton_helpers.maximum(tmp337, tmp19) tmp339 = libdevice.pow(tmp338, tmp21) tmp340 = tmp336 + tmp339 tmp341 = tmp10 - tmp329 tmp342 = triton_helpers.maximum(tmp341, tmp19) tmp343 = libdevice.pow(tmp342, tmp21) tmp344 = tmp340 + tmp343 tmp345 = tmp344 - tmp12 tmp346 = tmp345 * tmp51 tmp347 = tmp346 >= tmp19 tmp348 = tl.where(tmp347, tmp329, tmp328) tmp349 = tmp308 * tmp1 tmp350 = tmp348 + tmp349 tmp351 = tmp2 - tmp350 tmp352 = triton_helpers.maximum(tmp351, tmp19) tmp353 = libdevice.pow(tmp352, tmp21) tmp354 = tmp4 - tmp350 tmp355 = triton_helpers.maximum(tmp354, tmp19) tmp356 = libdevice.pow(tmp355, tmp21) tmp357 = tmp353 + tmp356 tmp358 = tmp7 - tmp350 tmp359 = triton_helpers.maximum(tmp358, tmp19) tmp360 = libdevice.pow(tmp359, tmp21) tmp361 = tmp357 + tmp360 tmp362 = tmp10 - tmp350 tmp363 = triton_helpers.maximum(tmp362, tmp19) tmp364 = libdevice.pow(tmp363, tmp21) tmp365 = tmp361 + tmp364 tmp366 = tmp365 - tmp12 tmp367 = tmp366 * tmp51 tmp368 = tmp367 >= tmp19 tmp369 = tl.where(tmp368, tmp350, tmp348) tmp370 = tmp349 * tmp1 tmp371 = tmp369 + tmp370 tmp372 = tmp2 - tmp371 tmp373 = triton_helpers.maximum(tmp372, tmp19) tmp374 = libdevice.pow(tmp373, tmp21) tmp375 = tmp4 - tmp371 tmp376 = triton_helpers.maximum(tmp375, tmp19) tmp377 = libdevice.pow(tmp376, tmp21) tmp378 = tmp374 + tmp377 tmp379 = tmp7 - tmp371 tmp380 = triton_helpers.maximum(tmp379, tmp19) tmp381 = libdevice.pow(tmp380, tmp21) tmp382 = tmp378 + tmp381 tmp383 = tmp10 - tmp371 tmp384 = triton_helpers.maximum(tmp383, tmp19) tmp385 = libdevice.pow(tmp384, tmp21) tmp386 = tmp382 + tmp385 tmp387 = tmp386 - tmp12 tmp388 = tmp387 * tmp51 tmp389 = tmp388 >= tmp19 tmp390 = tl.where(tmp389, tmp371, tmp369) tmp391 = tmp370 * tmp1 tmp392 = tmp390 + tmp391 tmp393 = tmp2 - tmp392 tmp394 = triton_helpers.maximum(tmp393, tmp19) tmp395 = libdevice.pow(tmp394, tmp21) tmp396 = tmp4 - tmp392 tmp397 = triton_helpers.maximum(tmp396, tmp19) tmp398 = libdevice.pow(tmp397, tmp21) tmp399 = tmp395 + tmp398 tmp400 = tmp7 - tmp392 tmp401 = triton_helpers.maximum(tmp400, tmp19) tmp402 = libdevice.pow(tmp401, tmp21) tmp403 = tmp399 + tmp402 tmp404 = tmp10 - tmp392 tmp405 = triton_helpers.maximum(tmp404, tmp19) tmp406 = libdevice.pow(tmp405, tmp21) tmp407 = tmp403 + tmp406 tmp408 = tmp407 - tmp12 tmp409 = tmp408 * tmp51 tmp410 = tmp409 >= tmp19 tmp411 = tl.where(tmp410, tmp392, tmp390) tmp412 = tmp391 * tmp1 tmp413 = tmp411 + tmp412 tmp414 = tmp2 - tmp413 tmp415 = triton_helpers.maximum(tmp414, tmp19) tmp416 = libdevice.pow(tmp415, tmp21) tmp417 = tmp4 - tmp413 tmp418 = triton_helpers.maximum(tmp417, tmp19) tmp419 = libdevice.pow(tmp418, tmp21) tmp420 = tmp416 + tmp419 tmp421 = tmp7 - tmp413 tmp422 = triton_helpers.maximum(tmp421, tmp19) tmp423 = libdevice.pow(tmp422, tmp21) tmp424 = tmp420 + tmp423 tmp425 = tmp10 - tmp413 tmp426 = triton_helpers.maximum(tmp425, tmp19) tmp427 = libdevice.pow(tmp426, tmp21) tmp428 = tmp424 + tmp427 tmp429 = tmp428 - tmp12 tmp430 = tmp429 * tmp51 tmp431 = tmp430 >= tmp19 tmp432 = tl.where(tmp431, tmp413, tmp411) tmp433 = tmp412 * tmp1 tmp434 = tmp432 + tmp433 tmp435 = tmp2 - tmp434 tmp436 = triton_helpers.maximum(tmp435, tmp19) tmp437 = libdevice.pow(tmp436, tmp21) tmp438 = tmp4 - tmp434 tmp439 = triton_helpers.maximum(tmp438, tmp19) tmp440 = libdevice.pow(tmp439, tmp21) tmp441 = tmp437 + tmp440 tmp442 = tmp7 - tmp434 tmp443 = triton_helpers.maximum(tmp442, tmp19) tmp444 = libdevice.pow(tmp443, tmp21) tmp445 = tmp441 + tmp444 tmp446 = tmp10 - tmp434 tmp447 = triton_helpers.maximum(tmp446, tmp19) tmp448 = libdevice.pow(tmp447, tmp21) tmp449 = tmp445 + tmp448 tmp450 = tmp449 - tmp12 tmp451 = tmp450 * tmp51 tmp452 = tmp451 >= tmp19 tmp453 = tl.where(tmp452, tmp434, tmp432) tmp454 = tmp433 * tmp1 tmp455 = tmp453 + tmp454 tmp456 = tmp2 - tmp455 tmp457 = triton_helpers.maximum(tmp456, tmp19) tmp458 = libdevice.pow(tmp457, tmp21) tmp459 = tmp4 - tmp455 tmp460 = triton_helpers.maximum(tmp459, tmp19) tmp461 = libdevice.pow(tmp460, tmp21) tmp462 = tmp458 + tmp461 tmp463 = tmp7 - tmp455 tmp464 = triton_helpers.maximum(tmp463, tmp19) tmp465 = libdevice.pow(tmp464, tmp21) tmp466 = tmp462 + tmp465 tmp467 = tmp10 - tmp455 tmp468 = triton_helpers.maximum(tmp467, tmp19) tmp469 = libdevice.pow(tmp468, tmp21) tmp470 = tmp466 + tmp469 tmp471 = tmp470 - tmp12 tmp472 = tmp471 * tmp51 tmp473 = tmp472 >= tmp19 tmp474 = tl.where(tmp473, tmp455, tmp453) tmp475 = tmp454 * tmp1 tmp476 = tmp474 + tmp475 tmp477 = tmp2 - tmp476 tmp478 = triton_helpers.maximum(tmp477, tmp19) tmp479 = libdevice.pow(tmp478, tmp21) tmp480 = tmp4 - tmp476 tmp481 = triton_helpers.maximum(tmp480, tmp19) tmp482 = libdevice.pow(tmp481, tmp21) tmp483 = tmp479 + tmp482 tmp484 = tmp7 - tmp476 tmp485 = triton_helpers.maximum(tmp484, tmp19) tmp486 = libdevice.pow(tmp485, tmp21) tmp487 = tmp483 + tmp486 tmp488 = tmp10 - tmp476 tmp489 = triton_helpers.maximum(tmp488, tmp19) tmp490 = libdevice.pow(tmp489, tmp21) tmp491 = tmp487 + tmp490 tmp492 = tmp491 - tmp12 tmp493 = tmp492 * tmp51 tmp494 = tmp493 >= tmp19 tmp495 = tl.where(tmp494, tmp476, tmp474) tmp496 = tmp475 * tmp1 tmp497 = tmp495 + tmp496 tmp498 = tmp2 - tmp497 tmp499 = triton_helpers.maximum(tmp498, tmp19) tmp500 = libdevice.pow(tmp499, tmp21) tmp501 = tmp4 - tmp497 tmp502 = triton_helpers.maximum(tmp501, tmp19) tmp503 = libdevice.pow(tmp502, tmp21) tmp504 = tmp500 + tmp503 tmp505 = tmp7 - tmp497 tmp506 = triton_helpers.maximum(tmp505, tmp19) tmp507 = libdevice.pow(tmp506, tmp21) tmp508 = tmp504 + tmp507 tmp509 = tmp10 - tmp497 tmp510 = triton_helpers.maximum(tmp509, tmp19) tmp511 = libdevice.pow(tmp510, tmp21) tmp512 = tmp508 + tmp511 tmp513 = tmp512 - tmp12 tmp514 = tmp513 * tmp51 tmp515 = tmp514 >= tmp19 tmp516 = tl.where(tmp515, tmp497, tmp495) tmp517 = tmp496 * tmp1 tmp518 = tmp516 + tmp517 tmp519 = tmp2 - tmp518 tmp520 = triton_helpers.maximum(tmp519, tmp19) tmp521 = libdevice.pow(tmp520, tmp21) tmp522 = tmp4 - tmp518 tmp523 = triton_helpers.maximum(tmp522, tmp19) tmp524 = libdevice.pow(tmp523, tmp21) tmp525 = tmp521 + tmp524 tmp526 = tmp7 - tmp518 tmp527 = triton_helpers.maximum(tmp526, tmp19) tmp528 = libdevice.pow(tmp527, tmp21) tmp529 = tmp525 + tmp528 tmp530 = tmp10 - tmp518 tmp531 = triton_helpers.maximum(tmp530, tmp19) tmp532 = libdevice.pow(tmp531, tmp21) tmp533 = tmp529 + tmp532 tmp534 = tmp533 - tmp12 tmp535 = tmp534 * tmp51 tmp536 = tmp535 >= tmp19 tmp537 = tl.where(tmp536, tmp518, tmp516) tmp538 = tmp517 * tmp1 tmp539 = tmp537 + tmp538 tmp540 = tmp2 - tmp539 tmp541 = triton_helpers.maximum(tmp540, tmp19) tmp542 = libdevice.pow(tmp541, tmp21) tmp543 = tmp4 - tmp539 tmp544 = triton_helpers.maximum(tmp543, tmp19) tmp545 = libdevice.pow(tmp544, tmp21) tmp546 = tmp542 + tmp545 tmp547 = tmp7 - tmp539 tmp548 = triton_helpers.maximum(tmp547, tmp19) tmp549 = libdevice.pow(tmp548, tmp21) tmp550 = tmp546 + tmp549 tmp551 = tmp10 - tmp539 tmp552 = triton_helpers.maximum(tmp551, tmp19) tmp553 = libdevice.pow(tmp552, tmp21) tmp554 = tmp550 + tmp553 tmp555 = tmp554 - tmp12 tmp556 = tmp555 * tmp51 tmp557 = tmp556 >= tmp19 tmp558 = tl.where(tmp557, tmp539, tmp537) tmp559 = tmp538 * tmp1 tmp560 = tmp558 + tmp559 tmp561 = tmp2 - tmp560 tmp562 = triton_helpers.maximum(tmp561, tmp19) tmp563 = libdevice.pow(tmp562, tmp21) tmp564 = tmp4 - tmp560 tmp565 = triton_helpers.maximum(tmp564, tmp19) tmp566 = libdevice.pow(tmp565, tmp21) tmp567 = tmp563 + tmp566 tmp568 = tmp7 - tmp560 tmp569 = triton_helpers.maximum(tmp568, tmp19) tmp570 = libdevice.pow(tmp569, tmp21) tmp571 = tmp567 + tmp570 tmp572 = tmp10 - tmp560 tmp573 = triton_helpers.maximum(tmp572, tmp19) tmp574 = libdevice.pow(tmp573, tmp21) tmp575 = tmp571 + tmp574 tl.store(out_ptr0 + x0, tmp49, xmask) tl.store(out_ptr19 + x0, tmp308, xmask) tl.store(in_out_ptr12 + x0, tmp558, xmask) tl.store(out_ptr27 + x0, tmp575, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_where_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp11 + tmp23 tmp25 = tl.where(tmp10, tmp24, tmp11) tmp26 = tmp23 * tmp1 tmp27 = tmp25 + tmp26 tmp28 = tmp2 - tmp27 tmp29 = triton_helpers.maximum(tmp28, tmp9) tmp30 = 2.0 tmp31 = libdevice.pow(tmp29, tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + x0, xmask) tmp14 = tl.load(in_ptr2 + x0, xmask) tmp18 = tl.load(in_out_ptr0 + x0, xmask) tmp19 = tl.load(in_ptr3 + x0, xmask) tmp37 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp51 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp57 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp15 = tmp14 - tmp7 tmp16 = tmp15 * tmp10 tmp17 = tmp16 >= tmp12 tmp20 = 0.5 tmp21 = tmp19 * tmp20 tmp22 = tmp21 * tmp20 tmp23 = tmp22 * tmp20 tmp24 = tmp23 * tmp20 tmp25 = tmp24 * tmp20 tmp26 = tmp25 * tmp20 tmp27 = tmp26 * tmp20 tmp28 = tmp27 * tmp20 tmp29 = tmp28 * tmp20 tmp30 = tmp29 * tmp20 tmp31 = tmp30 * tmp20 tmp32 = tmp18 + tmp31 tmp33 = tl.where(tmp17, tmp32, tmp18) tmp34 = tmp31 * tmp20 tmp35 = tmp33 + tmp34 tmp36 = tl.where(tmp13, tmp35, tmp33) tmp38 = tmp37 * tmp20 tmp39 = tmp34 * tmp20 tmp40 = tmp36 + tmp39 tmp41 = tmp38 - tmp40 tmp42 = triton_helpers.maximum(tmp41, tmp12) tmp43 = 2.0 tmp44 = libdevice.pow(tmp42, tmp43) tmp46 = tmp45 * tmp20 tmp47 = tmp46 - tmp40 tmp48 = triton_helpers.maximum(tmp47, tmp12) tmp49 = libdevice.pow(tmp48, tmp43) tmp50 = tmp44 + tmp49 tmp52 = tmp51 * tmp20 tmp53 = tmp52 - tmp40 tmp54 = triton_helpers.maximum(tmp53, tmp12) tmp55 = libdevice.pow(tmp54, tmp43) tmp56 = tmp50 + tmp55 tmp58 = tmp57 * tmp20 tmp59 = tmp58 - tmp40 tmp60 = triton_helpers.maximum(tmp59, tmp12) tmp61 = libdevice.pow(tmp60, tmp43) tmp62 = tmp56 + tmp61 tl.store(in_out_ptr0 + x0, tmp36, xmask) tl.store(out_ptr0 + x0, tmp62, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp11 + tmp25 tmp27 = tl.where(tmp10, tmp26, tmp11) tmp28 = tmp25 * tmp1 tmp29 = tmp27 + tmp28 tmp30 = tmp2 - tmp29 tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + x0, xmask) tmp23 = tl.load(in_ptr2 + x0, xmask) tmp27 = tl.load(in_out_ptr0 + x0, xmask) tmp28 = tl.load(in_ptr3 + x0, xmask) tmp48 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last') tmp55 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp61 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp67 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp24 = tmp23 - tmp17 tmp25 = tmp24 * tmp20 tmp26 = tmp25 >= tmp1 tmp29 = 0.5 tmp30 = tmp28 * tmp29 tmp31 = tmp30 * tmp29 tmp32 = tmp31 * tmp29 tmp33 = tmp32 * tmp29 tmp34 = tmp33 * tmp29 tmp35 = tmp34 * tmp29 tmp36 = tmp35 * tmp29 tmp37 = tmp36 * tmp29 tmp38 = tmp37 * tmp29 tmp39 = tmp38 * tmp29 tmp40 = tmp39 * tmp29 tmp41 = tmp40 * tmp29 tmp42 = tmp41 * tmp29 tmp43 = tmp27 + tmp42 tmp44 = tl.where(tmp26, tmp43, tmp27) tmp45 = tmp42 * tmp29 tmp46 = tmp44 + tmp45 tmp47 = tl.where(tmp22, tmp46, tmp44) tmp49 = tmp48 * tmp29 tmp50 = tmp45 * tmp29 tmp51 = tmp47 + tmp50 tmp52 = tmp49 - tmp51 tmp53 = triton_helpers.maximum(tmp52, tmp1) tmp54 = libdevice.pow(tmp53, tmp3) tmp56 = tmp55 * tmp29 tmp57 = tmp56 - tmp51 tmp58 = triton_helpers.maximum(tmp57, tmp1) tmp59 = libdevice.pow(tmp58, tmp3) tmp60 = tmp54 + tmp59 tmp62 = tmp61 * tmp29 tmp63 = tmp62 - tmp51 tmp64 = triton_helpers.maximum(tmp63, tmp1) tmp65 = libdevice.pow(tmp64, tmp3) tmp66 = tmp60 + tmp65 tmp68 = tmp67 * tmp29 tmp69 = tmp68 - tmp51 tmp70 = triton_helpers.maximum(tmp69, tmp1) tmp71 = libdevice.pow(tmp70, tmp3) tmp72 = tmp66 + tmp71 tl.store(in_out_ptr0 + x0, tmp47, xmask) tl.store(out_ptr0 + x0, tmp72, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_where_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = 1.0 tmp5 = tmp3 - tmp4 tmp7 = tmp6 - tmp4 tmp8 = tmp5 * tmp7 tmp9 = 0.0 tmp10 = tmp8 >= tmp9 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp11 + tmp27 tmp29 = tl.where(tmp10, tmp28, tmp11) tmp30 = tmp27 * tmp1 tmp31 = tmp29 + tmp30 tmp32 = tmp2 - tmp31 tl.store(out_ptr0 + x2, tmp32, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6(in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + x0, xmask) tmp23 = tl.load(in_ptr2 + x0, xmask) tmp27 = tl.load(in_out_ptr0 + x0, xmask) tmp28 = tl.load(in_ptr3 + x0, xmask) tmp50 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last') tmp57 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp63 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp69 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp24 = tmp23 - tmp17 tmp25 = tmp24 * tmp20 tmp26 = tmp25 >= tmp1 tmp29 = 0.5 tmp30 = tmp28 * tmp29 tmp31 = tmp30 * tmp29 tmp32 = tmp31 * tmp29 tmp33 = tmp32 * tmp29 tmp34 = tmp33 * tmp29 tmp35 = tmp34 * tmp29 tmp36 = tmp35 * tmp29 tmp37 = tmp36 * tmp29 tmp38 = tmp37 * tmp29 tmp39 = tmp38 * tmp29 tmp40 = tmp39 * tmp29 tmp41 = tmp40 * tmp29 tmp42 = tmp41 * tmp29 tmp43 = tmp42 * tmp29 tmp44 = tmp43 * tmp29 tmp45 = tmp27 + tmp44 tmp46 = tl.where(tmp26, tmp45, tmp27) tmp47 = tmp44 * tmp29 tmp48 = tmp46 + tmp47 tmp49 = tl.where(tmp22, tmp48, tmp46) tmp51 = tmp50 * tmp29 tmp52 = tmp47 * tmp29 tmp53 = tmp49 + tmp52 tmp54 = tmp51 - tmp53 tmp55 = triton_helpers.maximum(tmp54, tmp1) tmp56 = libdevice.pow(tmp55, tmp3) tmp58 = tmp57 * tmp29 tmp59 = tmp58 - tmp53 tmp60 = triton_helpers.maximum(tmp59, tmp1) tmp61 = libdevice.pow(tmp60, tmp3) tmp62 = tmp56 + tmp61 tmp64 = tmp63 * tmp29 tmp65 = tmp64 - tmp53 tmp66 = triton_helpers.maximum(tmp65, tmp1) tmp67 = libdevice.pow(tmp66, tmp3) tmp68 = tmp62 + tmp67 tmp70 = tmp69 * tmp29 tmp71 = tmp70 - tmp53 tmp72 = triton_helpers.maximum(tmp71, tmp1) tmp73 = libdevice.pow(tmp72, tmp3) tmp74 = tmp68 + tmp73 tmp75 = tmp74 - tmp17 tmp76 = tmp75 * tmp20 tmp77 = tmp76 >= tmp1 tmp78 = tl.where(tmp77, tmp53, tmp49) tmp79 = tmp52 * tmp29 tmp80 = tmp78 + tmp79 tmp81 = tmp51 - tmp80 tmp82 = triton_helpers.maximum(tmp81, tmp1) tmp83 = libdevice.pow(tmp82, tmp3) tmp84 = tmp58 - tmp80 tmp85 = triton_helpers.maximum(tmp84, tmp1) tmp86 = libdevice.pow(tmp85, tmp3) tmp87 = tmp83 + tmp86 tmp88 = tmp64 - tmp80 tmp89 = triton_helpers.maximum(tmp88, tmp1) tmp90 = libdevice.pow(tmp89, tmp3) tmp91 = tmp87 + tmp90 tmp92 = tmp70 - tmp80 tmp93 = triton_helpers.maximum(tmp92, tmp1) tmp94 = libdevice.pow(tmp93, tmp3) tmp95 = tmp91 + tmp94 tmp96 = tmp95 - tmp17 tmp97 = tmp96 * tmp20 tmp98 = tmp97 >= tmp1 tmp99 = tl.where(tmp98, tmp80, tmp78) tmp100 = tmp79 * tmp29 tmp101 = tmp99 + tmp100 tmp102 = tmp51 - tmp101 tmp103 = triton_helpers.maximum(tmp102, tmp1) tmp104 = libdevice.pow(tmp103, tmp3) tmp105 = tmp58 - tmp101 tmp106 = triton_helpers.maximum(tmp105, tmp1) tmp107 = libdevice.pow(tmp106, tmp3) tmp108 = tmp104 + tmp107 tmp109 = tmp64 - tmp101 tmp110 = triton_helpers.maximum(tmp109, tmp1) tmp111 = libdevice.pow(tmp110, tmp3) tmp112 = tmp108 + tmp111 tmp113 = tmp70 - tmp101 tmp114 = triton_helpers.maximum(tmp113, tmp1) tmp115 = libdevice.pow(tmp114, tmp3) tmp116 = tmp112 + tmp115 tmp117 = tmp116 - tmp17 tmp118 = tmp117 * tmp20 tmp119 = tmp118 >= tmp1 tmp120 = tl.where(tmp119, tmp101, tmp99) tl.store(in_out_ptr2 + x0, tmp120, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp3 + tmp24 tmp26 = tmp2 - tmp25 tmp27 = 0.0 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = 2.0 tmp30 = libdevice.pow(tmp28, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_div_where_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + x0, xmask) tmp14 = tl.load(in_out_ptr0 + x0, xmask) tmp15 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = tmp17 * tmp16 tmp19 = tmp18 * tmp16 tmp20 = tmp19 * tmp16 tmp21 = tmp20 * tmp16 tmp22 = tmp21 * tmp16 tmp23 = tmp22 * tmp16 tmp24 = tmp23 * tmp16 tmp25 = tmp24 * tmp16 tmp26 = tmp25 * tmp16 tmp27 = tmp26 * tmp16 tmp28 = tmp27 * tmp16 tmp29 = tmp28 * tmp16 tmp30 = tmp29 * tmp16 tmp31 = tmp30 * tmp16 tmp32 = tmp31 * tmp16 tmp33 = tmp32 * tmp16 tmp34 = tmp33 * tmp16 tmp35 = tmp34 * tmp16 tmp36 = tmp35 * tmp16 tmp37 = tmp14 + tmp36 tmp38 = tl.where(tmp13, tmp37, tmp14) tl.store(in_out_ptr0 + x0, tmp38, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp3 + tmp25 tmp27 = tmp2 - tmp26 tmp28 = 0.0 tmp29 = triton_helpers.maximum(tmp27, tmp28) tmp30 = 2.0 tmp31 = libdevice.pow(tmp29, tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_div_where_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + x0, xmask) tmp14 = tl.load(in_out_ptr0 + x0, xmask) tmp15 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 1.0 tmp8 = tmp6 - tmp7 tmp10 = tmp9 - tmp7 tmp11 = tmp8 * tmp10 tmp12 = 0.0 tmp13 = tmp11 >= tmp12 tmp16 = 0.5 tmp17 = tmp15 * tmp16 tmp18 = tmp17 * tmp16 tmp19 = tmp18 * tmp16 tmp20 = tmp19 * tmp16 tmp21 = tmp20 * tmp16 tmp22 = tmp21 * tmp16 tmp23 = tmp22 * tmp16 tmp24 = tmp23 * tmp16 tmp25 = tmp24 * tmp16 tmp26 = tmp25 * tmp16 tmp27 = tmp26 * tmp16 tmp28 = tmp27 * tmp16 tmp29 = tmp28 * tmp16 tmp30 = tmp29 * tmp16 tmp31 = tmp30 * tmp16 tmp32 = tmp31 * tmp16 tmp33 = tmp32 * tmp16 tmp34 = tmp33 * tmp16 tmp35 = tmp34 * tmp16 tmp36 = tmp35 * tmp16 tmp37 = tmp36 * tmp16 tmp38 = tmp14 + tmp37 tmp39 = tl.where(tmp13, tmp38, tmp14) tl.store(in_out_ptr0 + x0, tmp39, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_sub_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp3 + tmp26 tmp28 = tmp2 - tmp27 tmp29 = 0.0 tmp30 = triton_helpers.maximum(tmp28, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_div_where_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + x0, xmask) tmp19 = tl.load(in_out_ptr0 + x0, xmask) tmp20 = tl.load(in_ptr2 + x0, xmask) tmp1 = 2.0 tmp2 = libdevice.pow(tmp0, tmp1) tmp4 = libdevice.pow(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = libdevice.pow(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = libdevice.pow(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp15 = tmp14 - tmp12 tmp16 = tmp13 * tmp15 tmp17 = 0.0 tmp18 = tmp16 >= tmp17 tmp21 = 0.5 tmp22 = tmp20 * tmp21 tmp23 = tmp22 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp24 * tmp21 tmp26 = tmp25 * tmp21 tmp27 = tmp26 * tmp21 tmp28 = tmp27 * tmp21 tmp29 = tmp28 * tmp21 tmp30 = tmp29 * tmp21 tmp31 = tmp30 * tmp21 tmp32 = tmp31 * tmp21 tmp33 = tmp32 * tmp21 tmp34 = tmp33 * tmp21 tmp35 = tmp34 * tmp21 tmp36 = tmp35 * tmp21 tmp37 = tmp36 * tmp21 tmp38 = tmp37 * tmp21 tmp39 = tmp38 * tmp21 tmp40 = tmp39 * tmp21 tmp41 = tmp40 * tmp21 tmp42 = tmp41 * tmp21 tmp43 = tmp42 * tmp21 tmp44 = tmp19 + tmp43 tmp45 = tl.where(tmp18, tmp44, tmp19) tl.store(in_out_ptr0 + x0, tmp45, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_sub_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp3 + tmp27 tmp29 = tmp2 - tmp28 tmp30 = 0.0 tmp31 = triton_helpers.maximum(tmp29, tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_div_where_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + x0, xmask) tmp19 = tl.load(in_out_ptr0 + x0, xmask) tmp20 = tl.load(in_ptr2 + x0, xmask) tmp1 = 2.0 tmp2 = libdevice.pow(tmp0, tmp1) tmp4 = libdevice.pow(tmp3, tmp1) tmp5 = tmp2 + tmp4 tmp7 = libdevice.pow(tmp6, tmp1) tmp8 = tmp5 + tmp7 tmp10 = libdevice.pow(tmp9, tmp1) tmp11 = tmp8 + tmp10 tmp12 = 1.0 tmp13 = tmp11 - tmp12 tmp15 = tmp14 - tmp12 tmp16 = tmp13 * tmp15 tmp17 = 0.0 tmp18 = tmp16 >= tmp17 tmp21 = 0.5 tmp22 = tmp20 * tmp21 tmp23 = tmp22 * tmp21 tmp24 = tmp23 * tmp21 tmp25 = tmp24 * tmp21 tmp26 = tmp25 * tmp21 tmp27 = tmp26 * tmp21 tmp28 = tmp27 * tmp21 tmp29 = tmp28 * tmp21 tmp30 = tmp29 * tmp21 tmp31 = tmp30 * tmp21 tmp32 = tmp31 * tmp21 tmp33 = tmp32 * tmp21 tmp34 = tmp33 * tmp21 tmp35 = tmp34 * tmp21 tmp36 = tmp35 * tmp21 tmp37 = tmp36 * tmp21 tmp38 = tmp37 * tmp21 tmp39 = tmp38 * tmp21 tmp40 = tmp39 * tmp21 tmp41 = tmp40 * tmp21 tmp42 = tmp41 * tmp21 tmp43 = tmp42 * tmp21 tmp44 = tmp43 * tmp21 tmp45 = tmp19 + tmp44 tmp46 = tl.where(tmp18, tmp45, tmp19) tl.store(in_out_ptr0 + x0, tmp46, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp3 + tmp28 tmp30 = tmp2 - tmp29 tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_add_div_where_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + x0, xmask) tmp23 = tl.load(in_out_ptr0 + x0, xmask) tmp24 = tl.load(in_ptr2 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp23 + tmp49 tmp51 = tl.where(tmp22, tmp50, tmp23) tl.store(in_out_ptr0 + x0, tmp51, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp28 * tmp1 tmp30 = tmp3 + tmp29 tmp31 = tmp2 - tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_add_div_where_18(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + x0, xmask) tmp23 = tl.load(in_out_ptr0 + x0, xmask) tmp24 = tl.load(in_ptr2 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp49 * tmp25 tmp51 = tmp23 + tmp50 tmp52 = tl.where(tmp22, tmp51, tmp23) tl.store(in_out_ptr0 + x0, tmp52, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp5 = tmp4 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = tmp6 * tmp1 tmp8 = tmp7 * tmp1 tmp9 = tmp8 * tmp1 tmp10 = tmp9 * tmp1 tmp11 = tmp10 * tmp1 tmp12 = tmp11 * tmp1 tmp13 = tmp12 * tmp1 tmp14 = tmp13 * tmp1 tmp15 = tmp14 * tmp1 tmp16 = tmp15 * tmp1 tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp1 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tmp21 = tmp20 * tmp1 tmp22 = tmp21 * tmp1 tmp23 = tmp22 * tmp1 tmp24 = tmp23 * tmp1 tmp25 = tmp24 * tmp1 tmp26 = tmp25 * tmp1 tmp27 = tmp26 * tmp1 tmp28 = tmp27 * tmp1 tmp29 = tmp28 * tmp1 tmp30 = tmp29 * tmp1 tmp31 = tmp3 + tmp30 tmp32 = tmp2 - tmp31 tl.store(out_ptr0 + x2, tmp32, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20(in_out_ptr0, in_out_ptr4, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + x0, xmask) tmp23 = tl.load(in_out_ptr0 + x0, xmask) tmp24 = tl.load(in_ptr2 + x0, xmask) tmp56 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp61 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp67 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp73 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 2.0 tmp4 = libdevice.pow(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp5, tmp1) tmp7 = libdevice.pow(tmp6, tmp3) tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp9, tmp1) tmp11 = libdevice.pow(tmp10, tmp3) tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp13, tmp1) tmp15 = libdevice.pow(tmp14, tmp3) tmp16 = tmp12 + tmp15 tmp17 = 1.0 tmp18 = tmp16 - tmp17 tmp20 = tmp19 - tmp17 tmp21 = tmp18 * tmp20 tmp22 = tmp21 >= tmp1 tmp25 = 0.5 tmp26 = tmp24 * tmp25 tmp27 = tmp26 * tmp25 tmp28 = tmp27 * tmp25 tmp29 = tmp28 * tmp25 tmp30 = tmp29 * tmp25 tmp31 = tmp30 * tmp25 tmp32 = tmp31 * tmp25 tmp33 = tmp32 * tmp25 tmp34 = tmp33 * tmp25 tmp35 = tmp34 * tmp25 tmp36 = tmp35 * tmp25 tmp37 = tmp36 * tmp25 tmp38 = tmp37 * tmp25 tmp39 = tmp38 * tmp25 tmp40 = tmp39 * tmp25 tmp41 = tmp40 * tmp25 tmp42 = tmp41 * tmp25 tmp43 = tmp42 * tmp25 tmp44 = tmp43 * tmp25 tmp45 = tmp44 * tmp25 tmp46 = tmp45 * tmp25 tmp47 = tmp46 * tmp25 tmp48 = tmp47 * tmp25 tmp49 = tmp48 * tmp25 tmp50 = tmp49 * tmp25 tmp51 = tmp50 * tmp25 tmp52 = tmp23 + tmp51 tmp53 = tl.where(tmp22, tmp52, tmp23) tmp54 = tmp51 * tmp25 tmp55 = tmp53 + tmp54 tmp57 = tmp56 * tmp25 tmp58 = tmp57 - tmp55 tmp59 = triton_helpers.maximum(tmp58, tmp1) tmp60 = libdevice.pow(tmp59, tmp3) tmp62 = tmp61 * tmp25 tmp63 = tmp62 - tmp55 tmp64 = triton_helpers.maximum(tmp63, tmp1) tmp65 = libdevice.pow(tmp64, tmp3) tmp66 = tmp60 + tmp65 tmp68 = tmp67 * tmp25 tmp69 = tmp68 - tmp55 tmp70 = triton_helpers.maximum(tmp69, tmp1) tmp71 = libdevice.pow(tmp70, tmp3) tmp72 = tmp66 + tmp71 tmp74 = tmp73 * tmp25 tmp75 = tmp74 - tmp55 tmp76 = triton_helpers.maximum(tmp75, tmp1) tmp77 = libdevice.pow(tmp76, tmp3) tmp78 = tmp72 + tmp77 tmp79 = tmp78 - tmp17 tmp80 = tmp79 * tmp20 tmp81 = tmp80 >= tmp1 tmp82 = tl.where(tmp81, tmp55, tmp53) tmp83 = tmp54 * tmp25 tmp84 = tmp82 + tmp83 tmp85 = tmp57 - tmp84 tmp86 = triton_helpers.maximum(tmp85, tmp1) tmp87 = libdevice.pow(tmp86, tmp3) tmp88 = tmp62 - tmp84 tmp89 = triton_helpers.maximum(tmp88, tmp1) tmp90 = libdevice.pow(tmp89, tmp3) tmp91 = tmp87 + tmp90 tmp92 = tmp68 - tmp84 tmp93 = triton_helpers.maximum(tmp92, tmp1) tmp94 = libdevice.pow(tmp93, tmp3) tmp95 = tmp91 + tmp94 tmp96 = tmp74 - tmp84 tmp97 = triton_helpers.maximum(tmp96, tmp1) tmp98 = libdevice.pow(tmp97, tmp3) tmp99 = tmp95 + tmp98 tmp100 = tmp99 - tmp17 tmp101 = tmp100 * tmp20 tmp102 = tmp101 >= tmp1 tmp103 = tl.where(tmp102, tmp84, tmp82) tmp104 = tmp83 * tmp25 tmp105 = tmp103 + tmp104 tmp106 = tmp57 - tmp105 tmp107 = triton_helpers.maximum(tmp106, tmp1) tmp108 = libdevice.pow(tmp107, tmp3) tmp109 = tmp62 - tmp105 tmp110 = triton_helpers.maximum(tmp109, tmp1) tmp111 = libdevice.pow(tmp110, tmp3) tmp112 = tmp108 + tmp111 tmp113 = tmp68 - tmp105 tmp114 = triton_helpers.maximum(tmp113, tmp1) tmp115 = libdevice.pow(tmp114, tmp3) tmp116 = tmp112 + tmp115 tmp117 = tmp74 - tmp105 tmp118 = triton_helpers.maximum(tmp117, tmp1) tmp119 = libdevice.pow(tmp118, tmp3) tmp120 = tmp116 + tmp119 tmp121 = tmp120 - tmp17 tmp122 = tmp121 * tmp20 tmp123 = tmp122 >= tmp1 tmp124 = tl.where(tmp123, tmp105, tmp103) tmp125 = tmp104 * tmp25 tmp126 = tmp124 + tmp125 tmp127 = tmp57 - tmp126 tmp128 = triton_helpers.maximum(tmp127, tmp1) tmp129 = libdevice.pow(tmp128, tmp3) tmp130 = tmp62 - tmp126 tmp131 = triton_helpers.maximum(tmp130, tmp1) tmp132 = libdevice.pow(tmp131, tmp3) tmp133 = tmp129 + tmp132 tmp134 = tmp68 - tmp126 tmp135 = triton_helpers.maximum(tmp134, tmp1) tmp136 = libdevice.pow(tmp135, tmp3) tmp137 = tmp133 + tmp136 tmp138 = tmp74 - tmp126 tmp139 = triton_helpers.maximum(tmp138, tmp1) tmp140 = libdevice.pow(tmp139, tmp3) tmp141 = tmp137 + tmp140 tmp142 = tmp141 - tmp17 tmp143 = tmp142 * tmp20 tmp144 = tmp143 >= tmp1 tmp145 = tl.where(tmp144, tmp126, tmp124) tmp146 = tmp125 * tmp25 tmp147 = tmp145 + tmp146 tmp148 = tmp57 - tmp147 tmp149 = triton_helpers.maximum(tmp148, tmp1) tmp150 = libdevice.pow(tmp149, tmp3) tmp151 = tmp62 - tmp147 tmp152 = triton_helpers.maximum(tmp151, tmp1) tmp153 = libdevice.pow(tmp152, tmp3) tmp154 = tmp150 + tmp153 tmp155 = tmp68 - tmp147 tmp156 = triton_helpers.maximum(tmp155, tmp1) tmp157 = libdevice.pow(tmp156, tmp3) tmp158 = tmp154 + tmp157 tmp159 = tmp74 - tmp147 tmp160 = triton_helpers.maximum(tmp159, tmp1) tmp161 = libdevice.pow(tmp160, tmp3) tmp162 = tmp158 + tmp161 tmp163 = tmp162 - tmp17 tmp164 = tmp163 * tmp20 tmp165 = tmp164 >= tmp1 tmp166 = tl.where(tmp165, tmp147, tmp145) tmp167 = tmp146 * tmp25 tmp168 = tmp166 + tmp167 tmp169 = tmp57 - tmp168 tmp170 = triton_helpers.maximum(tmp169, tmp1) tmp171 = libdevice.pow(tmp170, tmp3) tmp172 = tmp62 - tmp168 tmp173 = triton_helpers.maximum(tmp172, tmp1) tmp174 = libdevice.pow(tmp173, tmp3) tmp175 = tmp171 + tmp174 tmp176 = tmp68 - tmp168 tmp177 = triton_helpers.maximum(tmp176, tmp1) tmp178 = libdevice.pow(tmp177, tmp3) tmp179 = tmp175 + tmp178 tmp180 = tmp74 - tmp168 tmp181 = triton_helpers.maximum(tmp180, tmp1) tmp182 = libdevice.pow(tmp181, tmp3) tmp183 = tmp179 + tmp182 tmp184 = tmp183 - tmp17 tmp185 = tmp184 * tmp20 tmp186 = tmp185 >= tmp1 tmp187 = tl.where(tmp186, tmp168, tmp166) tmp188 = tmp167 * tmp25 tmp189 = tmp187 + tmp188 tmp190 = tmp57 - tmp189 tmp191 = triton_helpers.maximum(tmp190, tmp1) tmp192 = libdevice.pow(tmp191, tmp3) tmp193 = tmp62 - tmp189 tmp194 = triton_helpers.maximum(tmp193, tmp1) tmp195 = libdevice.pow(tmp194, tmp3) tmp196 = tmp192 + tmp195 tmp197 = tmp68 - tmp189 tmp198 = triton_helpers.maximum(tmp197, tmp1) tmp199 = libdevice.pow(tmp198, tmp3) tmp200 = tmp196 + tmp199 tmp201 = tmp74 - tmp189 tmp202 = triton_helpers.maximum(tmp201, tmp1) tmp203 = libdevice.pow(tmp202, tmp3) tmp204 = tmp200 + tmp203 tmp205 = tmp204 - tmp17 tmp206 = tmp205 * tmp20 tmp207 = tmp206 >= tmp1 tmp208 = tl.where(tmp207, tmp189, tmp187) tmp209 = tmp188 * tmp25 tmp210 = tmp208 + tmp209 tmp211 = tmp57 - tmp210 tmp212 = triton_helpers.maximum(tmp211, tmp1) tmp213 = libdevice.pow(tmp212, tmp3) tmp214 = tmp62 - tmp210 tmp215 = triton_helpers.maximum(tmp214, tmp1) tmp216 = libdevice.pow(tmp215, tmp3) tmp217 = tmp213 + tmp216 tmp218 = tmp68 - tmp210 tmp219 = triton_helpers.maximum(tmp218, tmp1) tmp220 = libdevice.pow(tmp219, tmp3) tmp221 = tmp217 + tmp220 tmp222 = tmp74 - tmp210 tmp223 = triton_helpers.maximum(tmp222, tmp1) tmp224 = libdevice.pow(tmp223, tmp3) tmp225 = tmp221 + tmp224 tmp226 = tmp225 - tmp17 tmp227 = tmp226 * tmp20 tmp228 = tmp227 >= tmp1 tmp229 = tl.where(tmp228, tmp210, tmp208) tmp230 = tmp209 * tmp25 tmp231 = tmp229 + tmp230 tmp232 = tmp57 - tmp231 tmp233 = triton_helpers.maximum(tmp232, tmp1) tmp234 = libdevice.pow(tmp233, tmp3) tmp235 = tmp62 - tmp231 tmp236 = triton_helpers.maximum(tmp235, tmp1) tmp237 = libdevice.pow(tmp236, tmp3) tmp238 = tmp234 + tmp237 tmp239 = tmp68 - tmp231 tmp240 = triton_helpers.maximum(tmp239, tmp1) tmp241 = libdevice.pow(tmp240, tmp3) tmp242 = tmp238 + tmp241 tmp243 = tmp74 - tmp231 tmp244 = triton_helpers.maximum(tmp243, tmp1) tmp245 = libdevice.pow(tmp244, tmp3) tmp246 = tmp242 + tmp245 tl.store(in_out_ptr4 + x0, tmp231, xmask) tl.store(out_ptr7 + x0, tmp246, xmask) @triton.jit def triton_poi_fused_add_clamp_div_mul_pow_sub_where_21(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 2.0 tmp8 = libdevice.pow(tmp6, tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf35 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf51 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf52 = reinterpret_tensor(buf51, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf51 buf53 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0[grid(64)]( buf52, arg0_1, buf1, buf35, buf53, 64, XBLOCK=64, num_warps=1, num_stages=1) buf54 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_clamp_div_mul_pow_sub_where_1[grid(256)](arg0_1, buf53, buf1, buf52, buf35, buf54, 256, XBLOCK=256, num_warps=4, num_stages=1) buf55 = buf52 del buf52 buf56 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2[grid(64)](buf55, buf54, buf1, buf53, buf35, arg0_1, buf56, 64, XBLOCK=64, num_warps=1, num_stages=1) buf57 = buf54 del buf54 triton_poi_fused_add_div_mul_sub_where_3[grid(256)](arg0_1, buf56, buf1, buf55, buf35, buf57, 256, XBLOCK=128, num_warps=4, num_stages=1) buf58 = buf55 del buf55 buf59 = buf53 del buf53 triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4[grid(64)](buf58, buf57, buf1, buf56, buf35, arg0_1, buf59, 64, XBLOCK=64, num_warps=1, num_stages=1) buf60 = buf57 del buf57 triton_poi_fused_add_div_mul_sub_where_5[grid(256)](arg0_1, buf59, buf1, buf58, buf35, buf60, 256, XBLOCK=128, num_warps=4, num_stages=1) buf61 = buf58 del buf58 buf66 = buf56 del buf56 buf67 = reinterpret_tensor(buf66, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf66 triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6[grid(64)](buf61, buf67, buf60, buf1, buf59, buf35, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf68 = buf60 del buf60 triton_poi_fused_add_clamp_div_mul_pow_sub_7[grid(256)](arg0_1, buf67, buf35, buf68, 256, XBLOCK=256, num_warps=4, num_stages=1) buf69 = buf67 del buf67 triton_poi_fused_add_div_where_8[grid(64)](buf69, buf68, buf1, buf35, 64, XBLOCK=64, num_warps=1, num_stages=1) buf70 = buf68 del buf68 triton_poi_fused_add_clamp_div_mul_pow_sub_9[grid(256)](arg0_1, buf69, buf35, buf70, 256, XBLOCK=256, num_warps=4, num_stages=1) buf71 = buf69 del buf69 triton_poi_fused_add_div_where_10[grid(64)](buf71, buf70, buf1, buf35, 64, XBLOCK=64, num_warps=1, num_stages=1) buf72 = buf70 del buf70 triton_poi_fused_add_clamp_div_mul_sub_11[grid(256)](arg0_1, buf71, buf35, buf72, 256, XBLOCK=256, num_warps=4, num_stages=1) buf73 = buf71 del buf71 triton_poi_fused_add_div_where_12[grid(64)](buf73, buf72, buf1, buf35, 64, XBLOCK=64, num_warps=1, num_stages=1) buf74 = buf72 del buf72 triton_poi_fused_add_clamp_div_mul_sub_13[grid(256)](arg0_1, buf73, buf35, buf74, 256, XBLOCK=256, num_warps=4, num_stages=1) buf75 = buf73 del buf73 triton_poi_fused_add_div_where_14[grid(64)](buf75, buf74, buf1, buf35, 64, XBLOCK=64, num_warps=1, num_stages=1) buf76 = buf74 del buf74 triton_poi_fused_add_div_mul_sub_15[grid(256)](arg0_1, buf75, buf35, buf76, 256, XBLOCK=256, num_warps=4, num_stages=1) buf77 = buf75 del buf75 triton_poi_fused_add_div_where_16[grid(64)](buf77, buf76, buf1, buf35, 64, XBLOCK=64, num_warps=1, num_stages=1) buf78 = buf76 del buf76 triton_poi_fused_add_div_mul_sub_17[grid(256)](arg0_1, buf77, buf35, buf78, 256, XBLOCK=256, num_warps=4, num_stages=1) buf79 = buf77 del buf77 triton_poi_fused_add_div_where_18[grid(64)](buf79, buf78, buf1, buf35, 64, XBLOCK=64, num_warps=1, num_stages=1) buf80 = buf78 del buf78 triton_poi_fused_add_div_mul_sub_19[grid(256)](arg0_1, buf79, buf35, buf80, 256, XBLOCK=256, num_warps=4, num_stages=1) buf81 = buf79 del buf79 buf95 = reinterpret_tensor(buf61, (4, 4, 4), (16, 4, 1), 0) del buf61 buf96 = reinterpret_tensor(buf95, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf95 buf97 = buf59 del buf59 triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20[grid(64)](buf81 , buf96, buf80, buf1, buf35, arg0_1, buf97, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 del buf35 del buf81 buf98 = buf80 del buf80 triton_poi_fused_add_clamp_div_mul_pow_sub_where_21[grid(256)](arg0_1, buf96, buf97, buf98, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf96 del buf97 return buf98, def entmax_bisect(X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True): """alpha-entmax: normalizing sparse transform (a la softmax). Solves the optimization problem: max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1. where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1, using a bisection (root finding, binary search) algorithm. This function is differentiable with respect to both X and alpha. Parameters ---------- X : torch.Tensor The input tensor. alpha : float or torch.Tensor Tensor of alpha parameters (> 1) to use. If scalar or python float, the same value is used for all rows, otherwise, it must have shape (or be expandable to) alpha.shape[j] == (X.shape[j] if j != dim else 1) A value of alpha=2 corresponds to sparsemax, and alpha=1 corresponds to softmax (but computing it this way is likely unstable). dim : int The dimension along which to apply alpha-entmax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. ensure_sum_one : bool, Whether to divide the result by its sum. If false, the result might sum to close but not exactly 1, which might cause downstream problems. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return EntmaxBisectFunction.apply(X, alpha, dim, n_iter, ensure_sum_one) class EntmaxBisectFunction(Function): @classmethod def _gp(cls, x, alpha): return x ** (alpha - 1) @classmethod def _gp_inv(cls, y, alpha): return y ** (1 / (alpha - 1)) @classmethod def _p(cls, X, alpha): return cls._gp_inv(torch.clamp(X, min=0), alpha) @classmethod def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True ): if not isinstance(alpha, torch.Tensor): alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device) alpha_shape = list(X.shape) alpha_shape[dim] = 1 alpha = alpha.expand(*alpha_shape) ctx.alpha = alpha ctx.dim = dim d = X.shape[dim] X = X * (alpha - 1) max_val, _ = X.max(dim=dim, keepdim=True) tau_lo = max_val - cls._gp(1, alpha) tau_hi = max_val - cls._gp(1 / d, alpha) f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1 dm = tau_hi - tau_lo for it in range(n_iter): dm /= 2 tau_m = tau_lo + dm p_m = cls._p(X - tau_m, alpha) f_m = p_m.sum(dim) - 1 mask = (f_m * f_lo >= 0).unsqueeze(dim) tau_lo = torch.where(mask, tau_m, tau_lo) if ensure_sum_one: p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim) ctx.save_for_backward(p_m) return p_m @classmethod def backward(cls, ctx, dY): Y, = ctx.saved_tensors gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1)) dX = dY * gppr q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) q = q.unsqueeze(ctx.dim) dX -= q * gppr d_alpha = None if ctx.needs_input_grad[1]: S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1)) ent = S.sum(ctx.dim).unsqueeze(ctx.dim) Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim) d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2 d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1) d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim) return dX, d_alpha, None, None, None class EntmaxBisectNew(nn.Module): def __init__(self, alpha=1.5, dim=-1, n_iter=50): """alpha-entmax: normalizing sparse map (a la softmax) via bisection. Solves the optimization problem: max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1. where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1, using a bisection (root finding, binary search) algorithm. Parameters ---------- alpha : float or torch.Tensor Tensor of alpha parameters (> 1) to use. If scalar or python float, the same value is used for all rows, otherwise, it must have shape (or be expandable to) alpha.shape[j] == (X.shape[j] if j != dim else 1) A value of alpha=2 corresponds to sparsemax; alpha=1 corresponds to softmax (but computing it this way is likely unstable). dim : int The dimension along which to apply alpha-entmax. n_iter : int Number of bisection iterations. For float32, 24 iterations should suffice for machine precision. """ self.dim = dim self.n_iter = n_iter self.alpha = alpha super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
antoniogois/entmax
EntmaxBisect
false
15,108
[ "MIT" ]
298
7ff3fa6b09ee53e04514173aacae9de90c95ca75
https://github.com/antoniogois/entmax/tree/7ff3fa6b09ee53e04514173aacae9de90c95ca75
decoder4
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/5d/c5dw65h6nafj4s44sgiahjrq6lb3zgwonovnkpx75jkkuxpl34xg.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 73728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kk/ckkeehad7xvjmxduxmwzjdm4zu3f5inttmd6kj4pju55xhwrnoea.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # out_3 => add, add_1, convert_element_type, convert_element_type_1, iota_2, mul, mul_1 # Graph fragment: # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {}) # %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_1 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/md/cmdntih7abyn6mwxuftxv3u7gvdavkba4iadd7mlu5mbghjqqv56.py # Topologically Sorted Source Nodes: [out_1, out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # out_1 => convolution # out_2 => relu # out_3 => _unsafe_index_2 # out_4 => _unsafe_index_3, _unsafe_index_4 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, %sub_5, None]), kwargs = {}) # %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_3, [None, None, None, %sub_5]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 102400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 10) % 10 x0 = xindex % 10 x4 = (xindex // 100) x2 = (xindex // 100) % 256 x7 = xindex tmp0 = tl.load(in_ptr0 + (7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (4*tmp4) + (16*x4)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x7), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7v/c7v45oloqfn4euhmt66oczbasvb63gxwy7fm3svojah6q6q2th7g.py # Topologically Sorted Source Nodes: [out_5, out_6, out_7], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_5 => convolution_1 # out_6 => relu_1 # out_7 => _unsafe_index_5, _unsafe_index_6 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_4, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %sub_5, None]), kwargs = {}) # %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_5, [None, None, None, %sub_5]), kwargs = {}) triton_poi_fused_convolution_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 102400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 10 x1 = (xindex // 10) % 10 x4 = (xindex // 100) x2 = (xindex // 100) % 256 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0))))) + ((-8)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (64*x4)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x5), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7n/c7nyyvchtptp5rbqvks7cavg63gajcswx5tpkvypdmc7ocd2zaaz.py # Topologically Sorted Source Nodes: [out_16], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # out_16 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_12, mul_4, mul_5 # Graph fragment: # %iota_12 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_12, 1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {}) # %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_4 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qf/cqflzwnnhiyl7hoawchcxkqxkkwckfdq3rpeoa3o4sk6wo5a6n6n.py # Topologically Sorted Source Nodes: [out_14, out_15, out_16, out_17], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # out_14 => convolution_4 # out_15 => relu_4 # out_16 => _unsafe_index_11 # out_17 => _unsafe_index_12, _unsafe_index_13 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_10, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {}) # %_unsafe_index_12 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_11, [None, None, %sub_21, None]), kwargs = {}) # %_unsafe_index_13 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_12, [None, None, None, %sub_21]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 165888 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 18) % 18 x0 = xindex % 18 x4 = (xindex // 324) x2 = (xindex // 324) % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (8*tmp4) + (64*x4)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x7), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/i4/ci4u2uzwdl4x6xsvbrs62puexrtawx4kfqztnsboljgpqpev5zur.py # Topologically Sorted Source Nodes: [out_18, out_19, out_20], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_18 => convolution_5 # out_19 => relu_5 # out_20 => _unsafe_index_14, _unsafe_index_15 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_13, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) # %_unsafe_index_14 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_5, [None, None, %sub_21, None]), kwargs = {}) # %_unsafe_index_15 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_14, [None, None, None, %sub_21]), kwargs = {}) triton_poi_fused_convolution_reflection_pad2d_relu_6 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 165888 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = (xindex // 18) % 18 x4 = (xindex // 324) x2 = (xindex // 324) % 128 x5 = xindex tmp0 = tl.load(in_ptr0 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x4)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x5), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uw/cuwhadke7ul24n6hzb5sal45fu4ro7spo5s7tl5x4bo5jl7gz66f.py # Topologically Sorted Source Nodes: [out_23], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # out_23 => add_8, add_9, convert_element_type_8, convert_element_type_9, iota_18, mul_8, mul_9 # Graph fragment: # %iota_18 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_18, 1), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, 0), kwargs = {}) # %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_8, torch.float32), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.0), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, 0.5), kwargs = {}) # %convert_element_type_9 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_9, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_7 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/k7/ck7bf3l3rzn2b5z27scfb6po6ttassqspu4urfkn5e7xbhsq2s2d.py # Topologically Sorted Source Nodes: [out_21, out_22, out_23, out_24], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # out_21 => convolution_6 # out_22 => relu_6 # out_23 => _unsafe_index_16 # out_24 => _unsafe_index_17, _unsafe_index_18 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_15, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {}) # %_unsafe_index_16 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_6, [None, None, %unsqueeze_2, %convert_element_type_9]), kwargs = {}) # %_unsafe_index_17 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_16, [None, None, %sub_29, None]), kwargs = {}) # %_unsafe_index_18 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_17, [None, None, None, %sub_29]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 34) % 34 x0 = xindex % 34 x4 = (xindex // 1156) x2 = (xindex // 1156) % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1)))))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0)))))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (16*tmp4) + (256*x4)), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x7), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yx/cyxiisvd6357e3nrt4znkmjivtcc45mzwcgpnvrj5jiwbalchths.py # Topologically Sorted Source Nodes: [out_25, out_26, out_27], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_25 => convolution_7 # out_26 => relu_7 # out_27 => _unsafe_index_19, _unsafe_index_20 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_18, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) # %_unsafe_index_19 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_7, [None, None, %sub_29, None]), kwargs = {}) # %_unsafe_index_20 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_19, [None, None, None, %sub_29]), kwargs = {}) triton_poi_fused_convolution_reflection_pad2d_relu_9 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = (xindex // 34) % 34 x4 = (xindex // 1156) x2 = (xindex // 1156) % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x5), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uj/cujb6y5y4osx2vvsp7mk2u3fjeqpsrecy4fu2epgp3awq5idnjyq.py # Topologically Sorted Source Nodes: [out_28], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_28 => convolution_8 # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_20, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/eo/ceoyvrt7lilhj2azq2emt7bipqc444mts3q5pn2y2v3acs3msegf.py # Topologically Sorted Source Nodes: [out_25, out_26], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_25 => convolution_7 # out_26 => relu_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_18, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) # %le_18 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/dt/cdtdjcn5ao6jckbptzeeuah2yudb27um4uwlvwffbnwtbnhpytvf.py # Topologically Sorted Source Nodes: [out_21, out_22], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_21 => convolution_6 # out_22 => relu_6 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_15, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {}) # %le_37 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_6, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_12 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ux/cuxkikwlw6hj4upwuoko3b6add2lr74ihu73rd7qw6rcofoytsns.py # Topologically Sorted Source Nodes: [out_18, out_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_18 => convolution_5 # out_19 => relu_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_13, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) # %le_56 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_13 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qk/cqkmpfvaqberctsqs5e4tsqughooqyxlitaje3pdowd5wtcne5tc.py # Topologically Sorted Source Nodes: [out_14, out_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_14 => convolution_4 # out_15 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_10, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %le_75 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_14 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ov/covedzte6ka54ajsekyitrd6p6mpp523wulxwtrl63ektefmpuve.py # Topologically Sorted Source Nodes: [out_11, out_12], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_11 => convolution_3 # out_12 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_8, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le_94 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_15 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/gy/cgybnsrpcjgnh4rqattwudtwjjam2me24yiscurttis3qbe4diay.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => convolution # out_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le_151 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_16 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 256 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args args.clear() assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_2, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (256, ), (1, )) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256, ), (1, )) assert_size_stride(primals_10, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (64, ), (1, )) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64, ), (1, )) assert_size_stride(primals_18, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 73728, grid=grid(73728), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1)) buf2 = empty_strided_cuda((8, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_1.run(buf2, 8, grid=grid(8), stream=stream0) buf3 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1, out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2.run(buf2, buf1, primals_3, buf3, 102400, grid=grid(102400), stream=stream0) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_5, out_6, out_7], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf4, primals_5, buf5, 102400, grid=grid(102400), stream=stream0) # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1)) buf7 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_8, out_9, out_10], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf6, primals_7, buf7, 102400, grid=grid(102400), stream=stream0) # Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 256, 8, 8), (16384, 64, 8, 1)) buf9 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_11, out_12, out_13], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf8, primals_9, buf9, 102400, grid=grid(102400), stream=stream0) # Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 8, 8), (8192, 64, 8, 1)) buf11 = empty_strided_cuda((16, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_16], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_4.run(buf11, 16, grid=grid(16), stream=stream0) buf12 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) # Topologically Sorted Source Nodes: [out_14, out_15, out_16, out_17], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5.run(buf11, buf10, primals_11, buf12, 165888, grid=grid(165888), stream=stream0) # Topologically Sorted Source Nodes: [out_18], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 16, 16), (32768, 256, 16, 1)) buf14 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) # Topologically Sorted Source Nodes: [out_18, out_19, out_20], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_6.run(buf13, primals_13, buf14, 165888, grid=grid(165888), stream=stream0) # Topologically Sorted Source Nodes: [out_21], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 16, 16), (16384, 256, 16, 1)) buf16 = empty_strided_cuda((32, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_23], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_7.run(buf16, 32, grid=grid(32), stream=stream0) buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) # Topologically Sorted Source Nodes: [out_21, out_22, out_23, out_24], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8.run(buf16, buf15, primals_15, buf17, 295936, grid=grid(295936), stream=stream0) # Topologically Sorted Source Nodes: [out_25], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) # Topologically Sorted Source Nodes: [out_25, out_26, out_27], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_9.run(buf18, primals_17, buf19, 295936, grid=grid(295936), stream=stream0) # Topologically Sorted Source Nodes: [out_28], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 3, 32, 32), (3072, 1024, 32, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [out_28], Original ATen: [aten.convolution] triton_poi_fused_convolution_10.run(buf21, primals_19, 12288, grid=grid(12288), stream=stream0) del primals_19 buf22 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [out_25, out_26], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_11.run(buf18, primals_17, buf22, 262144, grid=grid(262144), stream=stream0) del buf18 del primals_17 buf23 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [out_21, out_22], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_12.run(buf15, primals_15, buf23, 65536, grid=grid(65536), stream=stream0) del buf15 del primals_15 buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [out_18, out_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_13.run(buf13, primals_13, buf24, 131072, grid=grid(131072), stream=stream0) del buf13 del primals_13 buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [out_14, out_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_14.run(buf10, primals_11, buf25, 32768, grid=grid(32768), stream=stream0) del buf10 del primals_11 buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [out_11, out_12], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_15.run(buf8, primals_9, buf26, 65536, grid=grid(65536), stream=stream0) del buf8 del primals_9 buf27 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_15.run(buf6, primals_7, buf27, 65536, grid=grid(65536), stream=stream0) del buf6 del primals_7 buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_15.run(buf4, primals_5, buf28, 65536, grid=grid(65536), stream=stream0) del buf4 del primals_5 buf29 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_16.run(buf1, primals_3, buf29, 16384, grid=grid(16384), stream=stream0) del buf1 del primals_3 return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf0, buf2, buf3, buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf17, buf19, buf22, buf23, buf24, buf25, buf26, buf27, buf28, buf29, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((3, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class decoder4(nn.Module): def __init__(self): super(decoder4, self).__init__() self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv11 = nn.Conv2d(512, 256, 3, 1, 0) self.relu11 = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad12 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv12 = nn.Conv2d(256, 256, 3, 1, 0) self.relu12 = nn.ReLU(inplace=True) self.reflecPad13 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv13 = nn.Conv2d(256, 256, 3, 1, 0) self.relu13 = nn.ReLU(inplace=True) self.reflecPad14 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv14 = nn.Conv2d(256, 256, 3, 1, 0) self.relu14 = nn.ReLU(inplace=True) self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv15 = nn.Conv2d(256, 128, 3, 1, 0) self.relu15 = nn.ReLU(inplace=True) self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv16 = nn.Conv2d(128, 128, 3, 1, 0) self.relu16 = nn.ReLU(inplace=True) self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv17 = nn.Conv2d(128, 64, 3, 1, 0) self.relu17 = nn.ReLU(inplace=True) self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv18 = nn.Conv2d(64, 64, 3, 1, 0) self.relu18 = nn.ReLU(inplace=True) self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv19 = nn.Conv2d(64, 3, 3, 1, 0) def forward(self, x): out = self.reflecPad11(x) out = self.conv11(out) out = self.relu11(out) out = self.unpool(out) out = self.reflecPad12(out) out = self.conv12(out) out = self.relu12(out) out = self.reflecPad13(out) out = self.conv13(out) out = self.relu13(out) out = self.reflecPad14(out) out = self.conv14(out) out = self.relu14(out) out = self.reflecPad15(out) out = self.conv15(out) out = self.relu15(out) out = self.unpool2(out) out = self.reflecPad16(out) out = self.conv16(out) out = self.relu16(out) out = self.reflecPad17(out) out = self.conv17(out) out = self.relu17(out) out = self.unpool3(out) out = self.reflecPad18(out) out = self.conv18(out) out = self.relu18(out) out = self.reflecPad19(out) out = self.conv19(out) return out def get_inputs(): return [torch.rand([4, 512, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 10 % 10 x0 = xindex % 10 x4 = xindex // 100 x2 = xindex // 100 % 256 x7 = xindex tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1 ))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0 ))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 10 x1 = xindex // 10 % 10 x4 = xindex // 100 x2 = xindex // 100 % 256 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 18 % 18 x0 = xindex % 18 x4 = xindex // 324 x2 = xindex // 324 % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x1))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = xindex // 18 % 18 x4 = xindex // 324 x2 = xindex // 324 % 128 x5 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 34 % 34 x0 = xindex % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x1))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 295936 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 34 x1 = xindex // 34 % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_2, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch. float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(73728)](primals_1, buf0, 73728, XBLOCK=1024, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 256, 4, 4), (4096, 16, 4, 1)) buf2 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK =8, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid (102400)](buf2, buf1, primals_3, buf3, 102400, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 8, 8), (16384, 64, 8, 1)) buf5 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf4 , primals_5, buf5, 102400, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 8, 8), (16384, 64, 8, 1)) buf7 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf6 , primals_7, buf7, 102400, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 256, 8, 8), (16384, 64, 8, 1)) buf9 = empty_strided_cuda((4, 256, 10, 10), (25600, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(102400)](buf8 , primals_9, buf9, 102400, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 8, 8), (8192, 64, 8, 1)) buf11 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid (165888)](buf11, buf10, primals_11, buf12, 165888, XBLOCK=1024, num_warps=4, num_stages=1) buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 128, 16, 16), (32768, 256, 16, 1)) buf14 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(165888)]( buf13, primals_13, buf14, 165888, XBLOCK=512, num_warps=8, num_stages=1) buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 16, 16), (16384, 256, 16, 1)) buf16 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_7[grid(32)](buf16, 32, XBLOCK=32, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8[grid (295936)](buf16, buf15, primals_15, buf17, 295936, XBLOCK=1024, num_warps=4, num_stages=1) buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf19 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_9[grid(295936)]( buf18, primals_17, buf19, 295936, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 3, 32, 32), (3072, 1024, 32, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_10[grid(12288)](buf21, primals_19, 12288, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf22 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_11[grid(262144)]( buf18, primals_17, buf22, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf18 del primals_17 buf23 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_12[grid(65536)]( buf15, primals_15, buf23, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf15 del primals_15 buf24 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_13[grid(131072)]( buf13, primals_13, buf24, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf13 del primals_13 buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_14[grid(32768)]( buf10, primals_11, buf25, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf10 del primals_11 buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)]( buf8, primals_9, buf26, 65536, XBLOCK=256, num_warps=4, num_stages=1) del buf8 del primals_9 buf27 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)]( buf6, primals_7, buf27, 65536, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_7 buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_15[grid(65536)]( buf4, primals_5, buf28, 65536, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 buf29 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_16[grid(16384)]( buf1, primals_3, buf29, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 return (buf21, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf0, buf2, buf3, buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf17, buf19, buf22, buf23, buf24, buf25, buf26, buf27, buf28, buf29) class decoder4New(nn.Module): def __init__(self): super(decoder4New, self).__init__() self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv11 = nn.Conv2d(512, 256, 3, 1, 0) self.relu11 = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad12 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv12 = nn.Conv2d(256, 256, 3, 1, 0) self.relu12 = nn.ReLU(inplace=True) self.reflecPad13 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv13 = nn.Conv2d(256, 256, 3, 1, 0) self.relu13 = nn.ReLU(inplace=True) self.reflecPad14 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv14 = nn.Conv2d(256, 256, 3, 1, 0) self.relu14 = nn.ReLU(inplace=True) self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv15 = nn.Conv2d(256, 128, 3, 1, 0) self.relu15 = nn.ReLU(inplace=True) self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv16 = nn.Conv2d(128, 128, 3, 1, 0) self.relu16 = nn.ReLU(inplace=True) self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv17 = nn.Conv2d(128, 64, 3, 1, 0) self.relu17 = nn.ReLU(inplace=True) self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv18 = nn.Conv2d(64, 64, 3, 1, 0) self.relu18 = nn.ReLU(inplace=True) self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv19 = nn.Conv2d(64, 3, 3, 1, 0) def forward(self, input_0): primals_2 = self.conv11.weight primals_3 = self.conv11.bias primals_4 = self.conv12.weight primals_5 = self.conv12.bias primals_6 = self.conv13.weight primals_7 = self.conv13.bias primals_8 = self.conv14.weight primals_9 = self.conv14.bias primals_10 = self.conv15.weight primals_11 = self.conv15.bias primals_12 = self.conv16.weight primals_13 = self.conv16.bias primals_14 = self.conv17.weight primals_15 = self.conv17.bias primals_16 = self.conv18.weight primals_17 = self.conv18.bias primals_18 = self.conv19.weight primals_19 = self.conv19.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0]
czczup/URST
decoder4
false
15,109
[ "Apache-2.0" ]
119
000ec9f7728f12ffad989ec1d07b1dd579514133
https://github.com/czczup/URST/tree/000ec9f7728f12ffad989ec1d07b1dd579514133
CrossEntropyLossWithAuxiliary
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # loss => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/t2/ct2dbabladhyyceg2gmfqrslgo4edv7x6gs7iscumud7suileuje.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] # Source node to ATen node mapping: # loss => div, exp, log, mul, neg, sub_1, sum_1, sum_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf2, buf0, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel from torch.optim.lr_scheduler import * from torchvision.models import * from torchvision.transforms import * class CrossEntropyLossWithAuxiliary(nn.CrossEntropyLoss): """Cross-entropy loss that can add auxiliary loss if present.""" def forward(self, input, target): """Return cross-entropy loss and add auxiliary loss if possible.""" if isinstance(input, dict): loss = super().forward(input['out'], target) if 'aux' in input: loss += 0.5 * super().forward(input['aux'], target) else: loss = super().forward(input, target) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel from torch.optim.lr_scheduler import * from torchvision.models import * from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, class CrossEntropyLossWithAuxiliaryNew(nn.CrossEntropyLoss): """Cross-entropy loss that can add auxiliary loss if present.""" def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dani3l125/torchprune
CrossEntropyLossWithAuxiliary
false
15,110
[ "MIT" ]
74
f2589ec7514bd531ddaa7da3aed6388bb13712d3
https://github.com/dani3l125/torchprune/tree/f2589ec7514bd531ddaa7da3aed6388bb13712d3
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/cv/ccvw7in32scm7p2wxzcui3kswndskof6eozsle2lhskfuyjca4ot.py # Topologically Sorted Source Nodes: [sqrt, scaled_dot], Original ATen: [aten.sqrt, aten.div] # Source node to ATen node mapping: # scaled_dot => div # sqrt => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, %full_default), kwargs = {}) triton_poi_fused_div_sqrt_0 = async_compile.triton('triton_poi_fused_div_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sqrt_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [queries], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [keys], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [sqrt, scaled_dot], Original ATen: [aten.sqrt, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sqrt_0.run(buf3, 64, grid=grid(64), stream=stream0) return (buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SelfAttention(nn.Module): """A simple self-attention solution.""" def __init__(self, data_dim, dim_q): super(SelfAttention, self).__init__() self._layers = [] self._fc_q = nn.Linear(data_dim, dim_q) self._layers.append(self._fc_q) self._fc_k = nn.Linear(data_dim, dim_q) self._layers.append(self._fc_k) def forward(self, input_data): _b, _t, k = input_data.size() queries = self._fc_q(input=input_data) keys = self._fc_k(input=input_data) dot = torch.bmm(queries, keys.transpose(1, 2)) scaled_dot = torch.div(dot, torch.sqrt(torch.tensor(k).float())) return scaled_dot @property def layers(self): return self._layers def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'data_dim': 4, 'dim_q': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_sqrt_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = buf2 del buf2 get_raw_stream(0) triton_poi_fused_div_sqrt_0[grid(64)](buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) class SelfAttentionNew(nn.Module): """A simple self-attention solution.""" def __init__(self, data_dim, dim_q): super(SelfAttentionNew, self).__init__() self._layers = [] self._fc_q = nn.Linear(data_dim, dim_q) self._layers.append(self._fc_q) self._fc_k = nn.Linear(data_dim, dim_q) self._layers.append(self._fc_k) @property def layers(self): return self._layers def forward(self, input_0): primals_2 = self._fc_q.weight primals_3 = self._fc_q.bias primals_4 = self._fc_k.weight primals_5 = self._fc_k.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
daia99/brain-tokyo-workshop
SelfAttention
false
15,111
[ "Apache-2.0" ]
1,097
cd470255230afddba2b80d99a9641b682f4d0762
https://github.com/daia99/brain-tokyo-workshop/tree/cd470255230afddba2b80d99a9641b682f4d0762
FPNOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) return (buf2, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConvBNReLU(nn.Module): def __init__(self, in_chan, out_chan, ks=1, stride=1, padding=0, norm_layer=None, bias=True, *args, **kwargs): super(ConvBNReLU, self).__init__() self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride= stride, padding=padding, bias=bias) self.norm_layer = norm_layer if norm_layer is not None: self.bn = norm_layer(out_chan, activation='leaky_relu') self.init_weight() def forward(self, x): x = self.conv(x) if self.norm_layer is not None: x = self.bn(x) return x def init_weight(self): for ly in self.children(): if isinstance(ly, nn.Conv2d): nn.init.kaiming_normal_(ly.weight, a=1) if ly.bias is not None: nn.init.constant_(ly.bias, 0) class FPNOutput(nn.Module): def __init__(self, in_chan, mid_chan, n_classes, norm_layer=None, *args, **kwargs): super(FPNOutput, self).__init__() self.norm_layer = norm_layer self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1, norm_layer=norm_layer) self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias= False) self.init_weight() def forward(self, x): x = self.conv(x) x = self.conv_out(x) return x def init_weight(self): for ly in self.children(): if isinstance(ly, nn.Conv2d): nn.init.kaiming_normal_(ly.weight, a=1) if ly.bias is not None: nn.init.constant_(ly.bias, 0) def get_params(self): wd_params, nowd_params = [], [] for name, module in self.named_modules(): if isinstance(module, (nn.Linear, nn.Conv2d)): wd_params.append(module.weight) if module.bias is not None: nowd_params.append(module.bias) elif isinstance(module, self.norm_layer): nowd_params += list(module.parameters()) return wd_params, nowd_params def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_chan': 4, 'mid_chan': 4, 'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) return buf2, primals_1, primals_3, primals_4, buf1 class ConvBNReLU(nn.Module): def __init__(self, in_chan, out_chan, ks=1, stride=1, padding=0, norm_layer=None, bias=True, *args, **kwargs): super(ConvBNReLU, self).__init__() self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride= stride, padding=padding, bias=bias) self.norm_layer = norm_layer if norm_layer is not None: self.bn = norm_layer(out_chan, activation='leaky_relu') self.init_weight() def forward(self, x): x = self.conv(x) if self.norm_layer is not None: x = self.bn(x) return x def init_weight(self): for ly in self.children(): if isinstance(ly, nn.Conv2d): nn.init.kaiming_normal_(ly.weight, a=1) if ly.bias is not None: nn.init.constant_(ly.bias, 0) class FPNOutputNew(nn.Module): def __init__(self, in_chan, mid_chan, n_classes, norm_layer=None, *args, **kwargs): super(FPNOutputNew, self).__init__() self.norm_layer = norm_layer self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1, norm_layer=norm_layer) self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias= False) self.init_weight() def init_weight(self): for ly in self.children(): if isinstance(ly, nn.Conv2d): nn.init.kaiming_normal_(ly.weight, a=1) if ly.bias is not None: nn.init.constant_(ly.bias, 0) def get_params(self): wd_params, nowd_params = [], [] for name, module in self.named_modules(): if isinstance(module, (nn.Linear, nn.Conv2d)): wd_params.append(module.weight) if module.bias is not None: nowd_params.append(module.bias) elif isinstance(module, self.norm_layer): nowd_params += list(module.parameters()) return wd_params, nowd_params def forward(self, input_0): primals_1 = self.conv.conv.weight primals_2 = self.conv.conv.bias primals_4 = self.conv_out.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
dani3l125/TDNet
FPNOutput
false
15,112
[ "MIT" ]
195
3f8b5378fcc7f97c26b3760ddaf3d4402cf477d1
https://github.com/dani3l125/TDNet/tree/3f8b5378fcc7f97c26b3760ddaf3d4402cf477d1
NSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/oh/cohg3x6fijipddh3u7z33t4gs723aychh4zunppiqsjdoxakrsqc.py # Topologically Sorted Source Nodes: [add, pow_2, weights, sub, squared_error, scaled_loss, mean], Original ATen: [aten.add, aten.pow, aten.reciprocal, aten.mul, aten.sub, aten.mean] # Source node to ATen node mapping: # add => add # mean => mean # pow_2 => pow_2 # scaled_loss => mul_1 # squared_error => pow_1 # sub => sub # weights => mul, reciprocal # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg2_1, 0.1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%pow_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %pow_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused_add_mean_mul_pow_reciprocal_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_pow_reciprocal_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_reciprocal_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_pow_reciprocal_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp8 = tl.load(in_ptr1 + (r0), None) tmp9 = tl.load(in_ptr2 + (r0), None) tmp1 = 0.1 tmp2 = tmp0 + tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.full([1], 1, tl.int32) tmp5 = tmp4 / tmp3 tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp7 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, pow_2, weights, sub, squared_error, scaled_loss, mean], Original ATen: [aten.add, aten.pow, aten.reciprocal, aten.mul, aten.sub, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_pow_reciprocal_sub_0.run(buf1, arg2_1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class NSELoss(torch.nn.Module): """Calculate (batch-wise) NSE Loss. Each sample i is weighted by 1 / (std_i + eps)^2, where std_i is the standard deviation of the discharge from the basin, to which the sample belongs. Parameters: ----------- eps : float Constant, added to the weight for numerical stability and smoothing, default to 0.1 """ def __init__(self, eps: 'float'=0.1): super(NSELoss, self).__init__() self.eps = eps def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor', q_stds: 'torch.Tensor'): """Calculate the batch-wise NSE Loss function. Parameters ---------- y_pred : torch.Tensor Tensor containing the network prediction. y_true : torch.Tensor Tensor containing the true discharge values q_stds : torch.Tensor Tensor containing the discharge std (calculate over training period) of each sample Returns ------- torch.Tenor The (batch-wise) NSE Loss """ squared_error = (y_pred - y_true) ** 2 weights = 1 / (q_stds + self.eps) ** 2 scaled_loss = weights * squared_error return torch.mean(scaled_loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_pow_reciprocal_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp8 = tl.load(in_ptr1 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp1 = 0.1 tmp2 = tmp0 + tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.full([1], 1, tl.int32) tmp5 = tmp4 / tmp3 tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp7 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_mul_pow_reciprocal_sub_0[grid(1)](buf1, arg2_1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class NSELossNew(torch.nn.Module): """Calculate (batch-wise) NSE Loss. Each sample i is weighted by 1 / (std_i + eps)^2, where std_i is the standard deviation of the discharge from the basin, to which the sample belongs. Parameters: ----------- eps : float Constant, added to the weight for numerical stability and smoothing, default to 0.1 """ def __init__(self, eps: 'float'=0.1): super(NSELossNew, self).__init__() self.eps = eps def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
danielsuo/toy_flood
NSELoss
false
15,113
[ "MIT" ]
49
471d3c4091d86d4a00fbf910937d4e60fdaf79a1
https://github.com/danielsuo/toy_flood/tree/471d3c4091d86d4a00fbf910937d4e60fdaf79a1
MLP_CRITIC
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat] # Source node to ATen node mapping: # h => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/5n/c5nsofekij6a7ap52g7x25mzkhruvzcsi6l2zpm7nkpnzba6k7fc.py # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # h_1 => gt, mul, where # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_4), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor, %mul), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return (buf4, buf0, buf2, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class MLP_CRITIC(nn.Module): def __init__(self, opt): super(MLP_CRITIC, self).__init__() self.fc1 = nn.Linear(opt.resSize + opt.attSize, opt.ndh) self.fc2 = nn.Linear(opt.ndh, 1) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, x, att): h = torch.cat((x, att), 1) h = self.lrelu(self.fc1(h)) h = self.fc2(h) return h def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(resSize=4, attSize=4, ndh=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_leaky_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_6 return buf4, buf0, buf2, primals_5 def weights_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.02) m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class MLP_CRITICNew(nn.Module): def __init__(self, opt): super(MLP_CRITICNew, self).__init__() self.fc1 = nn.Linear(opt.resSize + opt.attSize, opt.ndh) self.fc2 = nn.Linear(opt.ndh, 1) self.lrelu = nn.LeakyReLU(0.2, True) self.apply(weights_init) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Huihui-z/CE-GZSL
MLP_CRITIC
false
15,114
[ "MIT" ]
58
7bf5358ac4727ea1dc2dc9dec2f453b014500bd8
https://github.com/Huihui-z/CE-GZSL/tree/7bf5358ac4727ea1dc2dc9dec2f453b014500bd8
OhemCELoss2D
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # cross_entropy => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ab/caba3uf64jnai465yq6lum26mopqu2jzfyp7afwq2pz25mmdxxwl.py # Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort] # Source node to ATen node mapping: # sort => sort # Graph fragment: # %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%view, -1, True), kwargs = {}) triton_per_fused_sort_1 = async_compile.triton('triton_per_fused_sort_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_sort_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + ((64*(r0 // 16)) + (r0 % 16)), None) tmp2 = tl.load(in_ptr0 + (16 + (64*(r0 // 16)) + (r0 % 16)), None) tmp5 = tl.load(in_ptr0 + (32 + (64*(r0 // 16)) + (r0 % 16)), None) tmp8 = tl.load(in_ptr0 + (48 + (64*(r0 // 16)) + (r0 % 16)), None) tmp13 = tl.load(in_ptr1 + ((64*(r0 // 16)) + (r0 % 16)), None) tmp16 = tl.load(in_ptr1 + (16 + (64*(r0 // 16)) + (r0 % 16)), None) tmp20 = tl.load(in_ptr1 + (32 + (64*(r0 // 16)) + (r0 % 16)), None) tmp24 = tl.load(in_ptr1 + (48 + (64*(r0 // 16)) + (r0 % 16)), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = r0 tmp29 = tmp28.to(tl.int16) tmp30 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp31 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK]) tmp32, tmp33, = triton_helpers.sort_with_index(tmp30, tmp31, None, 1, stable=False, descending=True) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp32, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zm/czmpmeec2ynbuwj3p6c7emzjovwazeojvnpmpa5w5yc6z2ipitfs.py # Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt] # Source node to ATen node mapping: # gt => gt # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%select, 0.35667494393873245), kwargs = {}) triton_poi_fused_gt_2 = async_compile.triton('triton_poi_fused_gt_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + (4)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = 0.35667494393873245 tmp3 = tmp1 > tmp2 tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sort], Original ATen: [aten.sort] triton_per_fused_sort_1.run(buf0, arg0_1, buf1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del buf0 buf3 = empty_strided_cuda((), (), torch.bool) # Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt] triton_poi_fused_gt_2.run(buf1, buf3, 1, grid=grid(1), stream=stream0) return (buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class OhemCELoss2D(nn.CrossEntropyLoss): """2D Cross Entropy Loss with Auxilary Loss""" def __init__(self, n_min, thresh=0.7, ignore_index=-1): super(OhemCELoss2D, self).__init__(None, None, ignore_index, reduction='none') self.thresh = -math.log(thresh) self.n_min = n_min self.ignore_index = ignore_index def forward(self, pred, target): return self.OhemCELoss(pred, target) def OhemCELoss(self, logits, labels): loss = super(OhemCELoss2D, self).forward(logits, labels).view(-1) loss, _ = torch.sort(loss, descending=True) if loss[self.n_min] > self.thresh: loss = loss[loss > self.thresh] else: loss = loss[:self.n_min] return torch.mean(loss) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_min': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused_sort_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (64 * (r0 // 16) + r0 % 16), None) tmp2 = tl.load(in_ptr0 + (16 + 64 * (r0 // 16) + r0 % 16), None) tmp5 = tl.load(in_ptr0 + (32 + 64 * (r0 // 16) + r0 % 16), None) tmp8 = tl.load(in_ptr0 + (48 + 64 * (r0 // 16) + r0 % 16), None) tmp13 = tl.load(in_ptr1 + (64 * (r0 // 16) + r0 % 16), None) tmp16 = tl.load(in_ptr1 + (16 + 64 * (r0 // 16) + r0 % 16), None) tmp20 = tl.load(in_ptr1 + (32 + 64 * (r0 // 16) + r0 % 16), None) tmp24 = tl.load(in_ptr1 + (48 + 64 * (r0 // 16) + r0 % 16), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = r0 tmp29 = tmp28.to(tl.int16) tmp30 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp31 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK]) tmp32, _tmp33 = triton_helpers.sort_with_index(tmp30, tmp31, None, 1, stable=False, descending=True) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp32, None) @triton.jit def triton_poi_fused_gt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 4) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = 0.35667494393873245 tmp3 = tmp1 > tmp2 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp3, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((64,), (1,), torch.float32) triton_per_fused_sort_1[grid(1)](buf0, arg0_1, buf1, 1, 64, XBLOCK= 1, num_warps=2, num_stages=1) del arg0_1 del buf0 buf3 = empty_strided_cuda((), (), torch.bool) triton_poi_fused_gt_2[grid(1)](buf1, buf3, 1, XBLOCK=1, num_warps=1, num_stages=1) return buf1, buf3 class OhemCELoss2DNew(nn.CrossEntropyLoss): """2D Cross Entropy Loss with Auxilary Loss""" def __init__(self, n_min, thresh=0.7, ignore_index=-1): super(OhemCELoss2DNew, self).__init__(None, None, ignore_index, reduction='none') self.thresh = -math.log(thresh) self.n_min = n_min self.ignore_index = ignore_index def OhemCELoss(self, logits, labels): loss = super(OhemCELoss2DNew, self).forward(logits, labels).view(-1) loss, _ = torch.sort(loss, descending=True) if loss[self.n_min] > self.thresh: loss = loss[loss > self.thresh] else: loss = loss[:self.n_min] return torch.mean(loss) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dani3l125/TDNet
OhemCELoss2D
false
15,115
[ "MIT" ]
195
3f8b5378fcc7f97c26b3760ddaf3d4402cf477d1
https://github.com/dani3l125/TDNet/tree/3f8b5378fcc7f97c26b3760ddaf3d4402cf477d1
RewardCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/kb/ckbatnngzuvq3z7mgsfaxtjswlmq3oe4felkwzn6n2v3rjbtpadh.py # Topologically Sorted Source Nodes: [neg, mul], Original ATen: [aten.neg, aten.mul] # Source node to ATen node mapping: # mul => mul # neg => neg # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %view_1), kwargs = {}) triton_poi_fused_mul_neg_0 = async_compile.triton('triton_poi_fused_mul_neg_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_neg_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = -tmp0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/c3/cc3yny734hxamrlyucs2vfypyf64tc37jvydcozkqsfdgywoa6jx.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%full_default, %slice_2], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 1.0 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp0 >= tmp3 tmp9 = tl.full([1], 4, tl.int64) tmp10 = tmp0 < tmp9 tmp11 = tl.load(in_ptr0 + ((4*x1) + ((-1) + x0)), tmp8 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = tmp13.to(tl.float32) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp8, tmp14, tmp15) tmp17 = tl.where(tmp4, tmp7, tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [neg, mul], Original ATen: [aten.neg, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_neg_0.run(arg0_1, arg1_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(arg2_1, buf1, 16, grid=grid(16), stream=stream0) del arg2_1 return (buf0, reinterpret_tensor(buf1, (16, ), (1, ), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import Variable from torch.autograd import * def to_contiguous(tensor): if tensor.is_contiguous(): return tensor else: return tensor.contiguous() class RewardCriterion(nn.Module): def __init__(self): super(RewardCriterion, self).__init__() def forward(self, input, seq, reward): input = to_contiguous(input).view(-1) reward = to_contiguous(reward).view(-1) mask = (seq > 0).float() mask = to_contiguous(torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1)).view(-1) output = -input * reward * Variable(mask) output = torch.sum(output) / torch.sum(mask) return output def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_neg_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = -tmp0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 1.0 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp11 = tl.load(in_ptr0 + (4 * x1 + (-1 + x0)), tmp8 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = tmp13.to(tl.float32) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp8, tmp14, tmp15) tmp17 = tl.where(tmp4, tmp7, tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_mul_neg_0[grid(16)](arg0_1, arg1_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_cat_1[grid(16)](arg2_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg2_1 return buf0, reinterpret_tensor(buf1, (16,), (1,), 0) def to_contiguous(tensor): if tensor.is_contiguous(): return tensor else: return tensor.contiguous() class RewardCriterionNew(nn.Module): def __init__(self): super(RewardCriterionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
daqingliu/CAVP
RewardCriterion
false
15,116
[ "MIT" ]
49
d383affde78dbc75e369095c27954dcdd79478d0
https://github.com/daqingliu/CAVP/tree/d383affde78dbc75e369095c27954dcdd79478d0
CircularPad
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lq/clqc7cz2u4t6qtgcpxxrq3bmn42rzk332haxyukbgv7pwc5db4as.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.cat, aten.constant_pad_nd] # Source node to ATen node mapping: # x => cat # x_1 => constant_pad_nd # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_1, %arg0_1, %arg0_1], -2), kwargs = {}) # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%cat, [4, 4, 0, 0], 0.0), kwargs = {}) triton_poi_fused_cat_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_cat_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) % 12 x2 = (xindex // 144) x4 = xindex tmp0 = (-4) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp8 & tmp5 tmp10 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1) + (16*x2)), tmp9 & xmask, other=0.0) tmp11 = tmp6 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp6 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + ((-4) + x0 + (4*((-4) + x1)) + (16*x2)), tmp15 & xmask, other=0.0) tmp17 = tmp6 >= tmp12 tmp18 = tl.full([1], 12, tl.int64) tmp19 = tmp6 < tmp18 tmp20 = tmp17 & tmp5 tmp21 = tl.load(in_ptr0 + ((-4) + x0 + (4*((-8) + x1)) + (16*x2)), tmp20 & xmask, other=0.0) tmp22 = tl.where(tmp14, tmp16, tmp21) tmp23 = tl.where(tmp8, tmp10, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp5, tmp23, tmp24) tl.store(out_ptr0 + (x4), tmp25, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.cat, aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_cat_constant_pad_nd_0.run(arg0_1, buf0, 2304, grid=grid(2304), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class CircularPad(nn.Module): def __init__(self, pad): super(CircularPad, self).__init__() self.pad = pad self.zeropad = torch.nn.modules.padding.ConstantPad2d((pad, pad, 0, 0), 0) def forward(self, x): x = torch.cat([x[..., -self.pad:, :], x, x[..., :self.pad, :]], dim=-2) x = self.zeropad(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'pad': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 % 12 x2 = xindex // 144 x4 = xindex tmp0 = -4 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = x1 tmp8 = tmp6 < tmp3 tmp9 = tmp8 & tmp5 tmp10 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp6 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp6 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (-4 + x0 + 4 * (-4 + x1) + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tmp6 >= tmp12 tl.full([1], 12, tl.int64) tmp20 = tmp17 & tmp5 tmp21 = tl.load(in_ptr0 + (-4 + x0 + 4 * (-8 + x1) + 16 * x2), tmp20 & xmask, other=0.0) tmp22 = tl.where(tmp14, tmp16, tmp21) tmp23 = tl.where(tmp8, tmp10, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp5, tmp23, tmp24) tl.store(out_ptr0 + x4, tmp25, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch. float32) get_raw_stream(0) triton_poi_fused_cat_constant_pad_nd_0[grid(2304)](arg0_1, buf0, 2304, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class CircularPadNew(nn.Module): def __init__(self, pad): super(CircularPadNew, self).__init__() self.pad = pad self.zeropad = torch.nn.modules.padding.ConstantPad2d((pad, pad, 0, 0), 0) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
daniilidis-group/emvn
CircularPad
false
15,117
[ "MIT" ]
46
1888e2a47b02e911e08afa40ba7341662cf3d6ea
https://github.com/daniilidis-group/emvn/tree/1888e2a47b02e911e08afa40ba7341662cf3d6ea
classifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/b4/cb4fo3fg3yi4znxfzckfwt26im4zzcp32xksxtd5omdmmelj6dh4.py # Topologically Sorted Source Nodes: [conv3d, out], Original ATen: [aten.convolution, aten._native_batch_norm_legit] # Source node to ATen node mapping: # conv3d => convolution # out => var_mean # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True}) triton_red_fused__native_batch_norm_legit_convolution_0 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[64, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__native_batch_norm_legit_convolution_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 64 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x4 = xindex x1 = (xindex // 4) % 4 tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_out_ptr0 + (r3 + (8192*x4)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce( tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0 ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r3 + (8192*x4)), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford( tmp4_mean, tmp4_m2, tmp4_weight, 1 ) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr1 + (x4), tmp5, xmask) tl.store(out_ptr2 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ub/cubezj6ijqtr6ozmlpbhsfaux4dgueu26axynpkoe4kuzjj2wavu.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten._native_batch_norm_legit] # Source node to ATen node mapping: # out => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused__native_batch_norm_legit_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (4*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + (4*x0)), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tmp16 = 32768.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + (x0), tmp20, xmask) tl.store(out_ptr0 + (x0), tmp13, xmask) tl.store(out_ptr1 + (x0), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/go/cgoqxhuzitj5jeg2dtqpufqhfelbl6ohzah65o373nozob7i6sl4.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # out_1 => gt, mul_1, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_1), kwargs = {}) triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = (xindex // 32768) tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 32768.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp10 = 0.0 tmp11 = tmp9 > tmp10 tmp12 = 0.01 tmp13 = tmp9 * tmp12 tmp14 = tl.where(tmp11, tmp9, tmp13) tl.store(out_ptr0 + (x2), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2l/c2lcjnttfk6wsjvyhufzijotjwe4fq7tgruqhmrtzma5w7h4wid7.py # Topologically Sorted Source Nodes: [conv3d_1, out_2, out_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu] # Source node to ATen node mapping: # conv3d_1 => convolution_1 # out_2 => add_1, rsqrt_1, var_mean_1 # out_3 => gt_1, mul_3, where_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%view_3, %primals_4, %primals_5, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_6, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, 0.01), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_6, %mul_3), kwargs = {}) triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_3 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[32, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_3(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 32 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 8 tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce( tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0 ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r2 + (4096*x3)), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford( tmp4_mean, tmp4_m2, tmp4_weight, 1 ) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + (x3), tmp4, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp7 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp8 = tmp7 - tmp4 tmp9 = 4096.0 tmp10 = tmp5 / tmp9 tmp11 = 1e-05 tmp12 = tmp10 + tmp11 tmp13 = libdevice.rsqrt(tmp12) tmp14 = tmp8 * tmp13 tmp15 = 0.0 tmp16 = tmp14 > tmp15 tmp17 = 0.01 tmp18 = tmp14 * tmp17 tmp19 = tl.where(tmp16, tmp14, tmp18) tl.store(out_ptr2 + (r2 + (4096*x3)), tmp19, rmask & xmask) tmp20 = 4096.0 tmp21 = tmp5 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tl.store(out_ptr3 + (x3), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/av/cav453nyafmoyhcksqp6t7yrtdsdrh673y5mzww4mdavbkgm5jf2.py # Topologically Sorted Source Nodes: [conv3d_2, out_4, out_5], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu] # Source node to ATen node mapping: # conv3d_2 => convolution_2 # out_4 => add_2, rsqrt_2, var_mean_2 # out_5 => gt_2, mul_5, where_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%view_8, %primals_6, %primals_7, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_10, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_11, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 0.01), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %view_11, %mul_5), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): xnumel = 64 XBLOCK: tl.constexpr = 1 rnumel = 512 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (r2 + (512*x3)), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 512, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = tmp2 - tmp10 tmp17 = 512.0 tmp18 = tmp15 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp16 * tmp21 tmp23 = 0.0 tmp24 = tmp22 > tmp23 tmp25 = 0.01 tmp26 = tmp22 * tmp25 tmp27 = tl.where(tmp24, tmp22, tmp26) tl.store(in_out_ptr0 + (r2 + (512*x3)), tmp2, None) tl.store(out_ptr2 + (r2 + (512*x3)), tmp27, None) tl.store(out_ptr3 + (x3), tmp21, None) tl.store(out_ptr0 + (x3), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ae/caeiddrnkhkarckypkltdh6ew7zan3h5gsdbeslz2xzqvnolzpxy.py # Topologically Sorted Source Nodes: [conv3d_3, out_6, out_7], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu] # Source node to ATen node mapping: # conv3d_3 => convolution_3 # out_6 => add_3, rsqrt_3, var_mean_3 # out_7 => gt_3, mul_7, where_3 # Graph fragment: # %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%view_13, %primals_8, %primals_9, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_15, [0, 2, 3, 4]), kwargs = {correction: 0, keepdim: True}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {}) # %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_16, 0), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_16, 0.01), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %view_16, %mul_7), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_5 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_5(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 128 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (r2 + (64*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 64.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(in_out_ptr0 + (r2 + (64*x3)), tmp2, xmask) tl.store(out_ptr2 + (r2 + (64*x3)), tmp30, xmask) tl.store(out_ptr3 + (x3), tmp24, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/yq/cyq2w4xi5vyuapvrbnkbgbxazossv3zrac7wiy3phtirxi2fb36k.py # Topologically Sorted Source Nodes: [out_8, z, out_9], Original ATen: [aten.convolution, aten.mean, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # out_8 => convolution_4 # out_9 => gt_4, mul_8, where_4 # z => mean # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_18, %primals_10, %primals_11, [2, 2, 2], [1, 1, 1], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%convolution_4, [-1, -2, -3], True), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_20, 0), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_20, 0.01), kwargs = {}) # %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %view_20, %mul_8), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_28, 0), kwargs = {}) triton_per_fused_convolution_leaky_relu_leaky_relu_backward_mean_6 = async_compile.triton('triton_per_fused_convolution_leaky_relu_leaky_relu_backward_mean_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 8], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_leaky_relu_leaky_relu_backward_mean_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_convolution_leaky_relu_leaky_relu_backward_mean_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 8 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r2 + (8*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 8.0 tmp8 = tmp6 / tmp7 tmp9 = 0.0 tmp10 = tmp8 > tmp9 tmp11 = 0.01 tmp12 = tmp8 * tmp11 tmp13 = tl.where(tmp10, tmp8, tmp12) tmp14 = tmp13 > tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (x3), tmp13, xmask) tl.store(out_ptr0 + (x3), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (8, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_5, (8, ), (1, )) assert_size_stride(primals_6, (16, 8, 4, 4, 4), (512, 64, 16, 4, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (32, 16, 4, 4, 4), (1024, 64, 16, 4, 1)) assert_size_stride(primals_9, (32, ), (1, )) assert_size_stride(primals_10, (4, 32, 4, 4, 4), (2048, 64, 16, 4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 32, 32, 32), (131072, 32768, 1024, 32, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((1, 16, 1, 1, 1, 4), (64, 4, 64, 64, 64, 1), torch.float32) buf3 = empty_strided_cuda((1, 16, 1, 1, 1, 4), (64, 4, 64, 64, 64, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1, 1, 4), (64, 4, 64, 64, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [conv3d, out], Original ATen: [aten.convolution, aten._native_batch_norm_legit] stream0 = get_raw_stream(0) triton_red_fused__native_batch_norm_legit_convolution_0.run(buf1, primals_3, buf2, buf3, buf4, 64, 8192, grid=grid(64), stream=stream0) del primals_3 buf5 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16), torch.float32) buf6 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16), torch.float32) buf8 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_1.run(buf2, buf3, buf4, buf5, buf6, buf8, 16, 4, grid=grid(16), stream=stream0) del buf2 buf9 = empty_strided_cuda((4, 4, 32, 32, 32), (131072, 32768, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_2.run(buf1, buf5, buf6, buf9, 524288, grid=grid(524288), stream=stream0) # Topologically Sorted Source Nodes: [conv3d_1], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_4, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 8, 16, 16, 16), (32768, 4096, 256, 16, 1)) buf11 = buf10; del buf10 # reuse buf12 = empty_strided_cuda((1, 32, 1, 1, 1), (32, 1, 32, 32, 32), torch.float32) buf16 = empty_strided_cuda((4, 8, 16, 16, 16), (32768, 4096, 256, 16, 1), torch.float32) buf15 = empty_strided_cuda((1, 32, 1, 1, 1), (32, 1, 32, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [conv3d_1, out_2, out_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu] triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_3.run(buf11, primals_5, buf12, buf16, buf15, 32, 4096, grid=grid(32), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv3d_2], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, primals_6, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 16, 8, 8, 8), (8192, 512, 64, 8, 1)) buf18 = buf17; del buf17 # reuse buf19 = reinterpret_tensor(buf4, (1, 64, 1, 1, 1), (64, 1, 64, 64, 64), 0); del buf4 # reuse buf23 = empty_strided_cuda((4, 16, 8, 8, 8), (8192, 512, 64, 8, 1), torch.float32) buf22 = reinterpret_tensor(buf3, (1, 64, 1, 1, 1), (64, 1, 64, 64, 64), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [conv3d_2, out_4, out_5], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_4.run(buf18, primals_7, buf19, buf23, buf22, 64, 512, grid=grid(64), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv3d_3], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_8, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 32, 4, 4, 4), (2048, 64, 16, 4, 1)) buf25 = buf24; del buf24 # reuse buf26 = empty_strided_cuda((1, 128, 1, 1, 1), (128, 1, 128, 128, 128), torch.float32) buf30 = empty_strided_cuda((4, 32, 4, 4, 4), (2048, 64, 16, 4, 1), torch.float32) buf29 = empty_strided_cuda((1, 128, 1, 1, 1), (128, 1, 128, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [conv3d_3, out_6, out_7], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_5.run(buf25, primals_9, buf26, buf30, buf29, 128, 64, grid=grid(128), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution] buf31 = extern_kernels.convolution(buf30, primals_10, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 4, 2, 2, 2), (32, 8, 4, 2, 1)) buf32 = reinterpret_tensor(buf6, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0); del buf6 # reuse buf33 = reinterpret_tensor(buf32, (4, 4), (4, 1), 0); del buf32 # reuse buf35 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_8, z, out_9], Original ATen: [aten.convolution, aten.mean, aten.leaky_relu, aten.leaky_relu_backward] triton_per_fused_convolution_leaky_relu_leaky_relu_backward_mean_6.run(buf33, buf31, primals_11, buf35, 16, 8, grid=grid(16), stream=stream0) del buf31 del primals_11 buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf33, reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf34) del primals_13 return (buf34, buf33, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, buf1, reinterpret_tensor(buf8, (16, ), (1, ), 0), buf9, buf11, reinterpret_tensor(buf15, (32, ), (1, ), 0), buf16, buf18, reinterpret_tensor(buf22, (64, ), (1, ), 0), buf23, buf25, reinterpret_tensor(buf29, (128, ), (1, ), 0), buf30, buf33, primals_12, buf35, reinterpret_tensor(buf26, (1, 128, 1, 1, 1), (128, 1, 1, 1, 1), 0), reinterpret_tensor(buf19, (1, 64, 1, 1, 1), (64, 1, 1, 1, 1), 0), reinterpret_tensor(buf12, (1, 32, 1, 1, 1), (32, 1, 1, 1, 1), 0), reinterpret_tensor(buf5, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 8, 4, 4, 4), (512, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, 16, 4, 4, 4), (1024, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 32, 4, 4, 4), (2048, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class classifier(nn.Module): def __init__(self, ef_dim, z_dim, class_num, voxel_size): super(classifier, self).__init__() self.ef_dim = ef_dim self.z_dim = z_dim self.class_num = class_num self.voxel_size = voxel_size self.conv_1 = nn.Conv3d(1, self.ef_dim, 4, stride=2, padding=1, bias=True) self.bn_1 = nn.InstanceNorm3d(self.ef_dim) self.conv_2 = nn.Conv3d(self.ef_dim, self.ef_dim * 2, 4, stride=2, padding=1, bias=True) self.bn_2 = nn.InstanceNorm3d(self.ef_dim * 2) self.conv_3 = nn.Conv3d(self.ef_dim * 2, self.ef_dim * 4, 4, stride =2, padding=1, bias=True) self.bn_3 = nn.InstanceNorm3d(self.ef_dim * 4) self.conv_4 = nn.Conv3d(self.ef_dim * 4, self.ef_dim * 8, 4, stride =2, padding=1, bias=True) self.bn_4 = nn.InstanceNorm3d(self.ef_dim * 8) self.conv_5 = nn.Conv3d(self.ef_dim * 8, self.z_dim, 4, stride=2, padding=1, bias=True) if self.voxel_size == 256: self.bn_5 = nn.InstanceNorm3d(self.z_dim) self.conv_5_2 = nn.Conv3d(self.z_dim, self.z_dim, 4, stride=2, padding=1, bias=True) self.linear1 = nn.Linear(self.z_dim, self.class_num, bias=True) def forward(self, inputs, is_training=False): out = inputs out = self.bn_1(self.conv_1(out)) out = F.leaky_relu(out, negative_slope=0.01, inplace=True) out = self.bn_2(self.conv_2(out)) out = F.leaky_relu(out, negative_slope=0.01, inplace=True) out = self.bn_3(self.conv_3(out)) out = F.leaky_relu(out, negative_slope=0.01, inplace=True) out = self.bn_4(self.conv_4(out)) out = F.leaky_relu(out, negative_slope=0.01, inplace=True) out = self.conv_5(out) if self.voxel_size == 256: out = self.bn_5(out) out = F.leaky_relu(out, negative_slope=0.01, inplace=True) out = self.conv_5_2(out) z = F.adaptive_avg_pool3d(out, output_size=(1, 1, 1)) z = z.view(-1, self.z_dim) out = F.leaky_relu(z, negative_slope=0.01, inplace=True) out = self.linear1(out) return out, z def get_inputs(): return [torch.rand([4, 1, 64, 64, 64])] def get_init_inputs(): return [[], {'ef_dim': 4, 'z_dim': 4, 'class_num': 4, 'voxel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused__native_batch_norm_legit_convolution_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): xnumel = 64 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x4 = xindex x1 = xindex // 4 % 4 tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_out_ptr0 + (r3 + 8192 * x4), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r3 + 8192 * x4), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp5, xmask) tl.store(out_ptr2 + x4, tmp6, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 32768.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 32768.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp10 = 0.0 tmp11 = tmp9 > tmp10 tmp12 = 0.01 tmp13 = tmp9 * tmp12 tmp14 = tl.where(tmp11, tmp9, tmp13) tl.store(out_ptr0 + x2, tmp14, None) @triton.jit def triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_3( in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 32 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 8 tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r2 + 4096 * x3), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6_tmp[:, None] tl.store(out_ptr0 + x3, tmp4, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp7 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp8 = tmp7 - tmp4 tmp9 = 4096.0 tmp10 = tmp5 / tmp9 tmp11 = 1e-05 tmp12 = tmp10 + tmp11 tmp13 = libdevice.rsqrt(tmp12) tmp14 = tmp8 * tmp13 tmp15 = 0.0 tmp16 = tmp14 > tmp15 tmp17 = 0.01 tmp18 = tmp14 * tmp17 tmp19 = tl.where(tmp16, tmp14, tmp18) tl.store(out_ptr2 + (r2 + 4096 * x3), tmp19, rmask & xmask) tmp20 = 4096.0 tmp21 = tmp5 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tl.store(out_ptr3 + x3, tmp24, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_4( in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (r2 + 512 * x3), None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 512, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = tmp2 - tmp10 tmp17 = 512.0 tmp18 = tmp15 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp16 * tmp21 tmp23 = 0.0 tmp24 = tmp22 > tmp23 tmp25 = 0.01 tmp26 = tmp22 * tmp25 tmp27 = tl.where(tmp24, tmp22, tmp26) tl.store(in_out_ptr0 + (r2 + 512 * x3), tmp2, None) tl.store(out_ptr2 + (r2 + 512 * x3), tmp27, None) tl.store(out_ptr3 + x3, tmp21, None) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_5( in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 128 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (r2 + 64 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = tmp2 - tmp12 tmp20 = 64.0 tmp21 = tmp18 / tmp20 tmp22 = 1e-05 tmp23 = tmp21 + tmp22 tmp24 = libdevice.rsqrt(tmp23) tmp25 = tmp19 * tmp24 tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.01 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(in_out_ptr0 + (r2 + 64 * x3), tmp2, xmask) tl.store(out_ptr2 + (r2 + 64 * x3), tmp30, xmask) tl.store(out_ptr3 + x3, tmp24, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_per_fused_convolution_leaky_relu_leaky_relu_backward_mean_6( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r2 + 8 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 8.0 tmp8 = tmp6 / tmp7 tmp9 = 0.0 tmp10 = tmp8 > tmp9 tmp11 = 0.01 tmp12 = tmp8 * tmp11 tmp13 = tl.where(tmp10, tmp8, tmp12) tmp14 = tmp13 > tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr0 + x3, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (8, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (16, 8, 4, 4, 4), (512, 64, 16, 4, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (32, 16, 4, 4, 4), (1024, 64, 16, 4, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (4, 32, 4, 4, 4), (2048, 64, 16, 4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 32, 32, 32), (131072, 32768, 1024, 32, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((1, 16, 1, 1, 1, 4), (64, 4, 64, 64, 64, 1), torch.float32) buf3 = empty_strided_cuda((1, 16, 1, 1, 1, 4), (64, 4, 64, 64, 64, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1, 1, 4), (64, 4, 64, 64, 64, 1), torch.float32) get_raw_stream(0) triton_red_fused__native_batch_norm_legit_convolution_0[grid(64)](buf1, primals_3, buf2, buf3, buf4, 64, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del primals_3 buf5 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16), torch.float32) buf6 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16), torch.float32) buf8 = empty_strided_cuda((1, 16, 1, 1, 1), (16, 1, 16, 16, 16), torch.float32) triton_per_fused__native_batch_norm_legit_1[grid(16)](buf2, buf3, buf4, buf5, buf6, buf8, 16, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf2 buf9 = empty_strided_cuda((4, 4, 32, 32, 32), (131072, 32768, 1024, 32, 1), torch.float32) triton_poi_fused_leaky_relu_2[grid(524288)](buf1, buf5, buf6, buf9, 524288, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_4, stride=(2, 2, 2 ), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 8, 16, 16, 16), (32768, 4096, 256, 16, 1) ) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((1, 32, 1, 1, 1), (32, 1, 32, 32, 32), torch.float32) buf16 = empty_strided_cuda((4, 8, 16, 16, 16), (32768, 4096, 256, 16, 1), torch.float32) buf15 = empty_strided_cuda((1, 32, 1, 1, 1), (32, 1, 32, 32, 32), torch.float32) triton_red_fused__native_batch_norm_legit_convolution_leaky_relu_3[grid (32)](buf11, primals_5, buf12, buf16, buf15, 32, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) del primals_5 buf17 = extern_kernels.convolution(buf16, primals_6, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 16, 8, 8, 8), (8192, 512, 64, 8, 1)) buf18 = buf17 del buf17 buf19 = reinterpret_tensor(buf4, (1, 64, 1, 1, 1), (64, 1, 64, 64, 64), 0) del buf4 buf23 = empty_strided_cuda((4, 16, 8, 8, 8), (8192, 512, 64, 8, 1), torch.float32) buf22 = reinterpret_tensor(buf3, (1, 64, 1, 1, 1), (64, 1, 64, 64, 64), 0) del buf3 triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_4[grid (64)](buf18, primals_7, buf19, buf23, buf22, 64, 512, num_warps =4, num_stages=1) del primals_7 buf24 = extern_kernels.convolution(buf23, primals_8, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 32, 4, 4, 4), (2048, 64, 16, 4, 1)) buf25 = buf24 del buf24 buf26 = empty_strided_cuda((1, 128, 1, 1, 1), (128, 1, 128, 128, 128), torch.float32) buf30 = empty_strided_cuda((4, 32, 4, 4, 4), (2048, 64, 16, 4, 1), torch.float32) buf29 = empty_strided_cuda((1, 128, 1, 1, 1), (128, 1, 128, 128, 128), torch.float32) triton_per_fused__native_batch_norm_legit_convolution_leaky_relu_5[grid (128)](buf25, primals_9, buf26, buf30, buf29, 128, 64, XBLOCK=8, num_warps=4, num_stages=1) del primals_9 buf31 = extern_kernels.convolution(buf30, primals_10, stride=(2, 2, 2), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 4, 2, 2, 2), (32, 8, 4, 2, 1)) buf32 = reinterpret_tensor(buf6, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0 ) del buf6 buf33 = reinterpret_tensor(buf32, (4, 4), (4, 1), 0) del buf32 buf35 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_per_fused_convolution_leaky_relu_leaky_relu_backward_mean_6[grid (16)](buf33, buf31, primals_11, buf35, 16, 8, XBLOCK=1, num_warps=2, num_stages=1) del buf31 del primals_11 buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, buf33, reinterpret_tensor( primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf34) del primals_13 return (buf34, buf33, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, buf1, reinterpret_tensor(buf8, (16,), (1,), 0), buf9, buf11, reinterpret_tensor(buf15, (32,), (1,), 0), buf16, buf18, reinterpret_tensor(buf22, (64,), (1,), 0), buf23, buf25, reinterpret_tensor(buf29, (128,), (1,), 0), buf30, buf33, primals_12, buf35, reinterpret_tensor(buf26, (1, 128, 1, 1, 1), ( 128, 1, 1, 1, 1), 0), reinterpret_tensor(buf19, (1, 64, 1, 1, 1), ( 64, 1, 1, 1, 1), 0), reinterpret_tensor(buf12, (1, 32, 1, 1, 1), ( 32, 1, 1, 1, 1), 0), reinterpret_tensor(buf5, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0)) class classifierNew(nn.Module): def __init__(self, ef_dim, z_dim, class_num, voxel_size): super(classifierNew, self).__init__() self.ef_dim = ef_dim self.z_dim = z_dim self.class_num = class_num self.voxel_size = voxel_size self.conv_1 = nn.Conv3d(1, self.ef_dim, 4, stride=2, padding=1, bias=True) self.bn_1 = nn.InstanceNorm3d(self.ef_dim) self.conv_2 = nn.Conv3d(self.ef_dim, self.ef_dim * 2, 4, stride=2, padding=1, bias=True) self.bn_2 = nn.InstanceNorm3d(self.ef_dim * 2) self.conv_3 = nn.Conv3d(self.ef_dim * 2, self.ef_dim * 4, 4, stride =2, padding=1, bias=True) self.bn_3 = nn.InstanceNorm3d(self.ef_dim * 4) self.conv_4 = nn.Conv3d(self.ef_dim * 4, self.ef_dim * 8, 4, stride =2, padding=1, bias=True) self.bn_4 = nn.InstanceNorm3d(self.ef_dim * 8) self.conv_5 = nn.Conv3d(self.ef_dim * 8, self.z_dim, 4, stride=2, padding=1, bias=True) if self.voxel_size == 256: self.bn_5 = nn.InstanceNorm3d(self.z_dim) self.conv_5_2 = nn.Conv3d(self.z_dim, self.z_dim, 4, stride=2, padding=1, bias=True) self.linear1 = nn.Linear(self.z_dim, self.class_num, bias=True) def forward(self, input_0): primals_2 = self.conv_1.weight primals_3 = self.conv_1.bias primals_4 = self.conv_2.weight primals_5 = self.conv_2.bias primals_6 = self.conv_3.weight primals_7 = self.conv_3.bias primals_8 = self.conv_4.weight primals_9 = self.conv_4.bias primals_10 = self.conv_5.weight primals_11 = self.conv_5.bias primals_12 = self.linear1.weight primals_13 = self.linear1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0], output[1]
czq142857/DECOR-GAN
classifier
false
15,118
[ "MIT" ]
55
79c80fc202b8af982989a3e3bb3afe85e606b71f
https://github.com/czq142857/DECOR-GAN/tree/79c80fc202b8af982989a3e3bb3afe85e606b71f
NetVLAD
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tb/ctbeeotfqzbneeewwh2aiay5657nsb5gfe5znphkkjrpdvh7ojsn.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # x => pow_1, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) triton_red_fused_linalg_vector_norm_0 = async_compile.triton('triton_red_fused_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[16384, 128], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 16384 rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = (xindex // 4096) _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (524288*x1)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + (x3), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ef/cefdzljppvz2lfunb6uf63d2oi3ptkpnhsxqbeffjopee5fas75z.py # Topologically Sorted Source Nodes: [x, residual_2, residual_4, residual_6, residual_8, residual_10, residual_12, residual_14, residual_16, residual_18, residual_20, residual_22, residual_24, residual_26, residual_28, residual_30, residual_32, residual_34, residual_36, residual_38, residual_40, residual_42, residual_44, residual_46, residual_48, residual_50, residual_52, residual_54, residual_56, residual_58, residual_60, residual_62, residual_64, residual_66, residual_68, residual_70, residual_72, residual_74, residual_76, residual_78, residual_80, residual_82, residual_84, residual_86, residual_88, residual_90, residual_92, residual_94, residual_96, residual_98, residual_100, residual_102, residual_104, residual_106, residual_108, residual_110, residual_112, residual_114, residual_116, residual_118, residual_120, residual_122, residual_124, residual_126], Original ATen: [aten.div, aten.sub] # Source node to ATen node mapping: # residual_10 => sub_6 # residual_100 => sub_51 # residual_102 => sub_52 # residual_104 => sub_53 # residual_106 => sub_54 # residual_108 => sub_55 # residual_110 => sub_56 # residual_112 => sub_57 # residual_114 => sub_58 # residual_116 => sub_59 # residual_118 => sub_60 # residual_12 => sub_7 # residual_120 => sub_61 # residual_122 => sub_62 # residual_124 => sub_63 # residual_126 => sub_64 # residual_14 => sub_8 # residual_16 => sub_9 # residual_18 => sub_10 # residual_2 => sub_2 # residual_20 => sub_11 # residual_22 => sub_12 # residual_24 => sub_13 # residual_26 => sub_14 # residual_28 => sub_15 # residual_30 => sub_16 # residual_32 => sub_17 # residual_34 => sub_18 # residual_36 => sub_19 # residual_38 => sub_20 # residual_4 => sub_3 # residual_40 => sub_21 # residual_42 => sub_22 # residual_44 => sub_23 # residual_46 => sub_24 # residual_48 => sub_25 # residual_50 => sub_26 # residual_52 => sub_27 # residual_54 => sub_28 # residual_56 => sub_29 # residual_58 => sub_30 # residual_6 => sub_4 # residual_60 => sub_31 # residual_62 => sub_32 # residual_64 => sub_33 # residual_66 => sub_34 # residual_68 => sub_35 # residual_70 => sub_36 # residual_72 => sub_37 # residual_74 => sub_38 # residual_76 => sub_39 # residual_78 => sub_40 # residual_8 => sub_5 # residual_80 => sub_41 # residual_82 => sub_42 # residual_84 => sub_43 # residual_86 => sub_44 # residual_88 => sub_45 # residual_90 => sub_46 # residual_92 => sub_47 # residual_94 => sub_48 # residual_96 => sub_49 # residual_98 => sub_50 # x => div # Graph fragment: # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_4), kwargs = {}) # %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_7), kwargs = {}) # %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_10), kwargs = {}) # %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_13), kwargs = {}) # %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_16), kwargs = {}) # %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_19), kwargs = {}) # %sub_8 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_22), kwargs = {}) # %sub_9 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_25), kwargs = {}) # %sub_10 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_28), kwargs = {}) # %sub_11 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_31), kwargs = {}) # %sub_12 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_34), kwargs = {}) # %sub_13 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_37), kwargs = {}) # %sub_14 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_40), kwargs = {}) # %sub_15 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_43), kwargs = {}) # %sub_16 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_46), kwargs = {}) # %sub_17 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_49), kwargs = {}) # %sub_18 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_52), kwargs = {}) # %sub_19 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_55), kwargs = {}) # %sub_20 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_58), kwargs = {}) # %sub_21 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_61), kwargs = {}) # %sub_22 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_64), kwargs = {}) # %sub_23 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_67), kwargs = {}) # %sub_24 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_70), kwargs = {}) # %sub_25 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_73), kwargs = {}) # %sub_26 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_76), kwargs = {}) # %sub_27 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_79), kwargs = {}) # %sub_28 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_82), kwargs = {}) # %sub_29 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_85), kwargs = {}) # %sub_30 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_88), kwargs = {}) # %sub_31 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_91), kwargs = {}) # %sub_32 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_94), kwargs = {}) # %sub_33 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_97), kwargs = {}) # %sub_34 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_100), kwargs = {}) # %sub_35 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_103), kwargs = {}) # %sub_36 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_106), kwargs = {}) # %sub_37 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_109), kwargs = {}) # %sub_38 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_112), kwargs = {}) # %sub_39 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_115), kwargs = {}) # %sub_40 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_118), kwargs = {}) # %sub_41 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_121), kwargs = {}) # %sub_42 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_124), kwargs = {}) # %sub_43 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_127), kwargs = {}) # %sub_44 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_130), kwargs = {}) # %sub_45 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_133), kwargs = {}) # %sub_46 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_136), kwargs = {}) # %sub_47 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_139), kwargs = {}) # %sub_48 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_142), kwargs = {}) # %sub_49 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_145), kwargs = {}) # %sub_50 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_148), kwargs = {}) # %sub_51 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_151), kwargs = {}) # %sub_52 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_154), kwargs = {}) # %sub_53 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_157), kwargs = {}) # %sub_54 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_160), kwargs = {}) # %sub_55 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_163), kwargs = {}) # %sub_56 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_166), kwargs = {}) # %sub_57 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_169), kwargs = {}) # %sub_58 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_172), kwargs = {}) # %sub_59 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_175), kwargs = {}) # %sub_60 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_178), kwargs = {}) # %sub_61 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_181), kwargs = {}) # %sub_62 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_184), kwargs = {}) # %sub_63 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_187), kwargs = {}) # %sub_64 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_190), kwargs = {}) triton_poi_fused_div_sub_1 = async_compile.triton('triton_poi_fused_div_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: '*fp32', 66: '*fp32', 67: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 65, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61, out_ptr62, out_ptr63, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = (xindex // 524288) x1 = (xindex // 4096) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last') tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last') tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last') tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last') tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last') tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last') tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last') tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last') tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last') tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last') tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last') tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last') tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last') tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last') tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last') tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last') tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last') tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last') tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last') tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last') tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last') tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last') tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last') tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last') tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last') tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last') tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last') tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last') tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last') tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last') tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last') tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last') tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp7 = tmp5 - tmp6 tmp9 = tmp5 - tmp8 tmp11 = tmp5 - tmp10 tmp13 = tmp5 - tmp12 tmp15 = tmp5 - tmp14 tmp17 = tmp5 - tmp16 tmp19 = tmp5 - tmp18 tmp21 = tmp5 - tmp20 tmp23 = tmp5 - tmp22 tmp25 = tmp5 - tmp24 tmp27 = tmp5 - tmp26 tmp29 = tmp5 - tmp28 tmp31 = tmp5 - tmp30 tmp33 = tmp5 - tmp32 tmp35 = tmp5 - tmp34 tmp37 = tmp5 - tmp36 tmp39 = tmp5 - tmp38 tmp41 = tmp5 - tmp40 tmp43 = tmp5 - tmp42 tmp45 = tmp5 - tmp44 tmp47 = tmp5 - tmp46 tmp49 = tmp5 - tmp48 tmp51 = tmp5 - tmp50 tmp53 = tmp5 - tmp52 tmp55 = tmp5 - tmp54 tmp57 = tmp5 - tmp56 tmp59 = tmp5 - tmp58 tmp61 = tmp5 - tmp60 tmp63 = tmp5 - tmp62 tmp65 = tmp5 - tmp64 tmp67 = tmp5 - tmp66 tmp69 = tmp5 - tmp68 tmp71 = tmp5 - tmp70 tmp73 = tmp5 - tmp72 tmp75 = tmp5 - tmp74 tmp77 = tmp5 - tmp76 tmp79 = tmp5 - tmp78 tmp81 = tmp5 - tmp80 tmp83 = tmp5 - tmp82 tmp85 = tmp5 - tmp84 tmp87 = tmp5 - tmp86 tmp89 = tmp5 - tmp88 tmp91 = tmp5 - tmp90 tmp93 = tmp5 - tmp92 tmp95 = tmp5 - tmp94 tmp97 = tmp5 - tmp96 tmp99 = tmp5 - tmp98 tmp101 = tmp5 - tmp100 tmp103 = tmp5 - tmp102 tmp105 = tmp5 - tmp104 tmp107 = tmp5 - tmp106 tmp109 = tmp5 - tmp108 tmp111 = tmp5 - tmp110 tmp113 = tmp5 - tmp112 tmp115 = tmp5 - tmp114 tmp117 = tmp5 - tmp116 tmp119 = tmp5 - tmp118 tmp121 = tmp5 - tmp120 tmp123 = tmp5 - tmp122 tmp125 = tmp5 - tmp124 tmp127 = tmp5 - tmp126 tmp129 = tmp5 - tmp128 tmp131 = tmp5 - tmp130 tl.store(out_ptr0 + (x3), tmp5, None) tl.store(out_ptr1 + (x3), tmp7, None) tl.store(out_ptr2 + (x3), tmp9, None) tl.store(out_ptr3 + (x3), tmp11, None) tl.store(out_ptr4 + (x3), tmp13, None) tl.store(out_ptr5 + (x3), tmp15, None) tl.store(out_ptr6 + (x3), tmp17, None) tl.store(out_ptr7 + (x3), tmp19, None) tl.store(out_ptr8 + (x3), tmp21, None) tl.store(out_ptr9 + (x3), tmp23, None) tl.store(out_ptr10 + (x3), tmp25, None) tl.store(out_ptr11 + (x3), tmp27, None) tl.store(out_ptr12 + (x3), tmp29, None) tl.store(out_ptr13 + (x3), tmp31, None) tl.store(out_ptr14 + (x3), tmp33, None) tl.store(out_ptr15 + (x3), tmp35, None) tl.store(out_ptr16 + (x3), tmp37, None) tl.store(out_ptr17 + (x3), tmp39, None) tl.store(out_ptr18 + (x3), tmp41, None) tl.store(out_ptr19 + (x3), tmp43, None) tl.store(out_ptr20 + (x3), tmp45, None) tl.store(out_ptr21 + (x3), tmp47, None) tl.store(out_ptr22 + (x3), tmp49, None) tl.store(out_ptr23 + (x3), tmp51, None) tl.store(out_ptr24 + (x3), tmp53, None) tl.store(out_ptr25 + (x3), tmp55, None) tl.store(out_ptr26 + (x3), tmp57, None) tl.store(out_ptr27 + (x3), tmp59, None) tl.store(out_ptr28 + (x3), tmp61, None) tl.store(out_ptr29 + (x3), tmp63, None) tl.store(out_ptr30 + (x3), tmp65, None) tl.store(out_ptr31 + (x3), tmp67, None) tl.store(out_ptr32 + (x3), tmp69, None) tl.store(out_ptr33 + (x3), tmp71, None) tl.store(out_ptr34 + (x3), tmp73, None) tl.store(out_ptr35 + (x3), tmp75, None) tl.store(out_ptr36 + (x3), tmp77, None) tl.store(out_ptr37 + (x3), tmp79, None) tl.store(out_ptr38 + (x3), tmp81, None) tl.store(out_ptr39 + (x3), tmp83, None) tl.store(out_ptr40 + (x3), tmp85, None) tl.store(out_ptr41 + (x3), tmp87, None) tl.store(out_ptr42 + (x3), tmp89, None) tl.store(out_ptr43 + (x3), tmp91, None) tl.store(out_ptr44 + (x3), tmp93, None) tl.store(out_ptr45 + (x3), tmp95, None) tl.store(out_ptr46 + (x3), tmp97, None) tl.store(out_ptr47 + (x3), tmp99, None) tl.store(out_ptr48 + (x3), tmp101, None) tl.store(out_ptr49 + (x3), tmp103, None) tl.store(out_ptr50 + (x3), tmp105, None) tl.store(out_ptr51 + (x3), tmp107, None) tl.store(out_ptr52 + (x3), tmp109, None) tl.store(out_ptr53 + (x3), tmp111, None) tl.store(out_ptr54 + (x3), tmp113, None) tl.store(out_ptr55 + (x3), tmp115, None) tl.store(out_ptr56 + (x3), tmp117, None) tl.store(out_ptr57 + (x3), tmp119, None) tl.store(out_ptr58 + (x3), tmp121, None) tl.store(out_ptr59 + (x3), tmp123, None) tl.store(out_ptr60 + (x3), tmp125, None) tl.store(out_ptr61 + (x3), tmp127, None) tl.store(out_ptr62 + (x3), tmp129, None) tl.store(out_ptr63 + (x3), tmp131, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u6/cu6dgbkwo4zyodk2zqiay4hwrwemkqpxzmixog3qipqaqcevgo7u.py # Topologically Sorted Source Nodes: [soft_assign_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # soft_assign_1 => amax, exp, sub, sum_2 # Graph fragment: # %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16384, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16384 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = (xindex // 4096) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (262144*x1)), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + (x3), tmp3, None) tl.store(out_ptr1 + (x3), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/46/c465fdmmhrzvuvb7xjrad46zallycaofrdeajo4ox533uv52dzji.py # Topologically Sorted Source Nodes: [residual, residual_1, sum_1, residual_3, sum_2, residual_5, sum_3, residual_7, sum_4, residual_9, sum_5, residual_11, sum_6, residual_13, sum_7, residual_15, sum_8, residual_17, sum_9, residual_19, sum_10, residual_21, sum_11, residual_23, sum_12, residual_25, sum_13, residual_27, sum_14, residual_29, sum_15, residual_31, sum_16, residual_33, sum_17, residual_35, sum_18, residual_37, sum_19, residual_39, sum_20, residual_41, sum_21, residual_43, sum_22, residual_45, sum_23, residual_47, sum_24, residual_49, sum_25, residual_51, sum_26, residual_53, sum_27, residual_55, sum_28, residual_57, sum_29], Original ATen: [aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # residual => sub_1 # residual_1 => mul # residual_11 => mul_5 # residual_13 => mul_6 # residual_15 => mul_7 # residual_17 => mul_8 # residual_19 => mul_9 # residual_21 => mul_10 # residual_23 => mul_11 # residual_25 => mul_12 # residual_27 => mul_13 # residual_29 => mul_14 # residual_3 => mul_1 # residual_31 => mul_15 # residual_33 => mul_16 # residual_35 => mul_17 # residual_37 => mul_18 # residual_39 => mul_19 # residual_41 => mul_20 # residual_43 => mul_21 # residual_45 => mul_22 # residual_47 => mul_23 # residual_49 => mul_24 # residual_5 => mul_2 # residual_51 => mul_25 # residual_53 => mul_26 # residual_55 => mul_27 # residual_57 => mul_28 # residual_7 => mul_3 # residual_9 => mul_4 # sum_1 => sum_3 # sum_10 => sum_12 # sum_11 => sum_13 # sum_12 => sum_14 # sum_13 => sum_15 # sum_14 => sum_16 # sum_15 => sum_17 # sum_16 => sum_18 # sum_17 => sum_19 # sum_18 => sum_20 # sum_19 => sum_21 # sum_2 => sum_4 # sum_20 => sum_22 # sum_21 => sum_23 # sum_22 => sum_24 # sum_23 => sum_25 # sum_24 => sum_26 # sum_25 => sum_27 # sum_26 => sum_28 # sum_27 => sum_29 # sum_28 => sum_30 # sum_29 => sum_31 # sum_3 => sum_5 # sum_4 => sum_6 # sum_5 => sum_7 # sum_6 => sum_8 # sum_7 => sum_9 # sum_8 => sum_10 # sum_9 => sum_11 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %unsqueeze_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %unsqueeze_5), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %unsqueeze_8), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %unsqueeze_11), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [-1]), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %unsqueeze_14), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [-1]), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %unsqueeze_17), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [-1]), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %unsqueeze_20), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [-1]), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %unsqueeze_23), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [-1]), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %unsqueeze_26), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_8, [-1]), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %unsqueeze_29), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_9, [-1]), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %unsqueeze_32), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_10, [-1]), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %unsqueeze_35), kwargs = {}) # %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_11, [-1]), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %unsqueeze_38), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_12, [-1]), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %unsqueeze_41), kwargs = {}) # %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_13, [-1]), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_15, %unsqueeze_44), kwargs = {}) # %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_14, [-1]), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %unsqueeze_47), kwargs = {}) # %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_15, [-1]), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %unsqueeze_50), kwargs = {}) # %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_16, [-1]), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %unsqueeze_53), kwargs = {}) # %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_17, [-1]), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %unsqueeze_56), kwargs = {}) # %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_18, [-1]), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %unsqueeze_59), kwargs = {}) # %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_19, [-1]), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, %unsqueeze_62), kwargs = {}) # %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_20, [-1]), kwargs = {}) # %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_22, %unsqueeze_65), kwargs = {}) # %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_21, [-1]), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %unsqueeze_68), kwargs = {}) # %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_22, [-1]), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %unsqueeze_71), kwargs = {}) # %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_23, [-1]), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %unsqueeze_74), kwargs = {}) # %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_24, [-1]), kwargs = {}) # %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_26, %unsqueeze_77), kwargs = {}) # %sum_28 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_25, [-1]), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %unsqueeze_80), kwargs = {}) # %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_26, [-1]), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_28, %unsqueeze_83), kwargs = {}) # %sum_30 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_27, [-1]), kwargs = {}) # %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_29, %unsqueeze_86), kwargs = {}) # %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_28, [-1]), kwargs = {}) triton_red_fused_mul_sub_sum_3 = async_compile.triton('triton_red_fused_mul_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: 'i32', 63: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 61, 'num_reduction': 29, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 128 tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') x1 = (xindex // 128) _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp3 = tl.load(in_ptr2 + (r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp14 = tl.load(in_ptr2 + (4096 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp23 = tl.load(in_ptr2 + (8192 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp32 = tl.load(in_ptr2 + (12288 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp41 = tl.load(in_ptr2 + (16384 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp49 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp50 = tl.load(in_ptr2 + (20480 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp58 = tl.load(in_ptr10 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp59 = tl.load(in_ptr2 + (24576 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp67 = tl.load(in_ptr11 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp68 = tl.load(in_ptr2 + (28672 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp76 = tl.load(in_ptr12 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp77 = tl.load(in_ptr2 + (32768 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tl.load(in_ptr13 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp86 = tl.load(in_ptr2 + (36864 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tl.load(in_ptr14 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp95 = tl.load(in_ptr2 + (40960 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp103 = tl.load(in_ptr15 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp104 = tl.load(in_ptr2 + (45056 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp112 = tl.load(in_ptr16 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp113 = tl.load(in_ptr2 + (49152 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp121 = tl.load(in_ptr17 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp122 = tl.load(in_ptr2 + (53248 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp130 = tl.load(in_ptr18 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp131 = tl.load(in_ptr2 + (57344 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp139 = tl.load(in_ptr19 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp140 = tl.load(in_ptr2 + (61440 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp148 = tl.load(in_ptr20 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp149 = tl.load(in_ptr2 + (65536 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp157 = tl.load(in_ptr21 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp158 = tl.load(in_ptr2 + (69632 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tl.load(in_ptr22 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp167 = tl.load(in_ptr2 + (73728 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp175 = tl.load(in_ptr23 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp176 = tl.load(in_ptr2 + (77824 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp184 = tl.load(in_ptr24 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp185 = tl.load(in_ptr2 + (81920 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp193 = tl.load(in_ptr25 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp194 = tl.load(in_ptr2 + (86016 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp202 = tl.load(in_ptr26 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp203 = tl.load(in_ptr2 + (90112 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp211 = tl.load(in_ptr27 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp212 = tl.load(in_ptr2 + (94208 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp220 = tl.load(in_ptr28 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp221 = tl.load(in_ptr2 + (98304 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp229 = tl.load(in_ptr29 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp230 = tl.load(in_ptr2 + (102400 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tl.load(in_ptr30 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp239 = tl.load(in_ptr2 + (106496 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp247 = tl.load(in_ptr31 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp248 = tl.load(in_ptr2 + (110592 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp256 = tl.load(in_ptr32 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp257 = tl.load(in_ptr2 + (114688 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask & xmask, tmp12, _tmp11) tmp15 = tmp14 - tmp4 tmp16 = tl_math.exp(tmp15) tmp17 = tmp16 / tmp7 tmp18 = tmp13 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask & xmask, tmp21, _tmp20) tmp24 = tmp23 - tmp4 tmp25 = tl_math.exp(tmp24) tmp26 = tmp25 / tmp7 tmp27 = tmp22 * tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp33 = tmp32 - tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp34 / tmp7 tmp36 = tmp31 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = _tmp38 + tmp37 _tmp38 = tl.where(rmask & xmask, tmp39, _tmp38) tmp42 = tmp41 - tmp4 tmp43 = tl_math.exp(tmp42) tmp44 = tmp43 / tmp7 tmp45 = tmp40 * tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp48 = _tmp47 + tmp46 _tmp47 = tl.where(rmask & xmask, tmp48, _tmp47) tmp51 = tmp50 - tmp4 tmp52 = tl_math.exp(tmp51) tmp53 = tmp52 / tmp7 tmp54 = tmp49 * tmp53 tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp57 = _tmp56 + tmp55 _tmp56 = tl.where(rmask & xmask, tmp57, _tmp56) tmp60 = tmp59 - tmp4 tmp61 = tl_math.exp(tmp60) tmp62 = tmp61 / tmp7 tmp63 = tmp58 * tmp62 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask & xmask, tmp66, _tmp65) tmp69 = tmp68 - tmp4 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 / tmp7 tmp72 = tmp67 * tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = _tmp74 + tmp73 _tmp74 = tl.where(rmask & xmask, tmp75, _tmp74) tmp78 = tmp77 - tmp4 tmp79 = tl_math.exp(tmp78) tmp80 = tmp79 / tmp7 tmp81 = tmp76 * tmp80 tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK]) tmp84 = _tmp83 + tmp82 _tmp83 = tl.where(rmask & xmask, tmp84, _tmp83) tmp87 = tmp86 - tmp4 tmp88 = tl_math.exp(tmp87) tmp89 = tmp88 / tmp7 tmp90 = tmp85 * tmp89 tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK]) tmp93 = _tmp92 + tmp91 _tmp92 = tl.where(rmask & xmask, tmp93, _tmp92) tmp96 = tmp95 - tmp4 tmp97 = tl_math.exp(tmp96) tmp98 = tmp97 / tmp7 tmp99 = tmp94 * tmp98 tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK]) tmp102 = _tmp101 + tmp100 _tmp101 = tl.where(rmask & xmask, tmp102, _tmp101) tmp105 = tmp104 - tmp4 tmp106 = tl_math.exp(tmp105) tmp107 = tmp106 / tmp7 tmp108 = tmp103 * tmp107 tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK]) tmp111 = _tmp110 + tmp109 _tmp110 = tl.where(rmask & xmask, tmp111, _tmp110) tmp114 = tmp113 - tmp4 tmp115 = tl_math.exp(tmp114) tmp116 = tmp115 / tmp7 tmp117 = tmp112 * tmp116 tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK]) tmp120 = _tmp119 + tmp118 _tmp119 = tl.where(rmask & xmask, tmp120, _tmp119) tmp123 = tmp122 - tmp4 tmp124 = tl_math.exp(tmp123) tmp125 = tmp124 / tmp7 tmp126 = tmp121 * tmp125 tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK]) tmp129 = _tmp128 + tmp127 _tmp128 = tl.where(rmask & xmask, tmp129, _tmp128) tmp132 = tmp131 - tmp4 tmp133 = tl_math.exp(tmp132) tmp134 = tmp133 / tmp7 tmp135 = tmp130 * tmp134 tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK]) tmp138 = _tmp137 + tmp136 _tmp137 = tl.where(rmask & xmask, tmp138, _tmp137) tmp141 = tmp140 - tmp4 tmp142 = tl_math.exp(tmp141) tmp143 = tmp142 / tmp7 tmp144 = tmp139 * tmp143 tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK]) tmp147 = _tmp146 + tmp145 _tmp146 = tl.where(rmask & xmask, tmp147, _tmp146) tmp150 = tmp149 - tmp4 tmp151 = tl_math.exp(tmp150) tmp152 = tmp151 / tmp7 tmp153 = tmp148 * tmp152 tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK]) tmp156 = _tmp155 + tmp154 _tmp155 = tl.where(rmask & xmask, tmp156, _tmp155) tmp159 = tmp158 - tmp4 tmp160 = tl_math.exp(tmp159) tmp161 = tmp160 / tmp7 tmp162 = tmp157 * tmp161 tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK]) tmp165 = _tmp164 + tmp163 _tmp164 = tl.where(rmask & xmask, tmp165, _tmp164) tmp168 = tmp167 - tmp4 tmp169 = tl_math.exp(tmp168) tmp170 = tmp169 / tmp7 tmp171 = tmp166 * tmp170 tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK]) tmp174 = _tmp173 + tmp172 _tmp173 = tl.where(rmask & xmask, tmp174, _tmp173) tmp177 = tmp176 - tmp4 tmp178 = tl_math.exp(tmp177) tmp179 = tmp178 / tmp7 tmp180 = tmp175 * tmp179 tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK]) tmp183 = _tmp182 + tmp181 _tmp182 = tl.where(rmask & xmask, tmp183, _tmp182) tmp186 = tmp185 - tmp4 tmp187 = tl_math.exp(tmp186) tmp188 = tmp187 / tmp7 tmp189 = tmp184 * tmp188 tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK]) tmp192 = _tmp191 + tmp190 _tmp191 = tl.where(rmask & xmask, tmp192, _tmp191) tmp195 = tmp194 - tmp4 tmp196 = tl_math.exp(tmp195) tmp197 = tmp196 / tmp7 tmp198 = tmp193 * tmp197 tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK]) tmp201 = _tmp200 + tmp199 _tmp200 = tl.where(rmask & xmask, tmp201, _tmp200) tmp204 = tmp203 - tmp4 tmp205 = tl_math.exp(tmp204) tmp206 = tmp205 / tmp7 tmp207 = tmp202 * tmp206 tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK]) tmp210 = _tmp209 + tmp208 _tmp209 = tl.where(rmask & xmask, tmp210, _tmp209) tmp213 = tmp212 - tmp4 tmp214 = tl_math.exp(tmp213) tmp215 = tmp214 / tmp7 tmp216 = tmp211 * tmp215 tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK]) tmp219 = _tmp218 + tmp217 _tmp218 = tl.where(rmask & xmask, tmp219, _tmp218) tmp222 = tmp221 - tmp4 tmp223 = tl_math.exp(tmp222) tmp224 = tmp223 / tmp7 tmp225 = tmp220 * tmp224 tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK]) tmp228 = _tmp227 + tmp226 _tmp227 = tl.where(rmask & xmask, tmp228, _tmp227) tmp231 = tmp230 - tmp4 tmp232 = tl_math.exp(tmp231) tmp233 = tmp232 / tmp7 tmp234 = tmp229 * tmp233 tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK]) tmp237 = _tmp236 + tmp235 _tmp236 = tl.where(rmask & xmask, tmp237, _tmp236) tmp240 = tmp239 - tmp4 tmp241 = tl_math.exp(tmp240) tmp242 = tmp241 / tmp7 tmp243 = tmp238 * tmp242 tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK]) tmp246 = _tmp245 + tmp244 _tmp245 = tl.where(rmask & xmask, tmp246, _tmp245) tmp249 = tmp248 - tmp4 tmp250 = tl_math.exp(tmp249) tmp251 = tmp250 / tmp7 tmp252 = tmp247 * tmp251 tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK]) tmp255 = _tmp254 + tmp253 _tmp254 = tl.where(rmask & xmask, tmp255, _tmp254) tmp258 = tmp257 - tmp4 tmp259 = tl_math.exp(tmp258) tmp260 = tmp259 / tmp7 tmp261 = tmp256 * tmp260 tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK]) tmp264 = _tmp263 + tmp262 _tmp263 = tl.where(rmask & xmask, tmp264, _tmp263) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + (x3), tmp11, xmask) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr1 + (x3), tmp20, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr2 + (x3), tmp29, xmask) tmp38 = tl.sum(_tmp38, 1)[:, None] tl.store(out_ptr3 + (x3), tmp38, xmask) tmp47 = tl.sum(_tmp47, 1)[:, None] tl.store(out_ptr4 + (x3), tmp47, xmask) tmp56 = tl.sum(_tmp56, 1)[:, None] tl.store(out_ptr5 + (x3), tmp56, xmask) tmp65 = tl.sum(_tmp65, 1)[:, None] tl.store(out_ptr6 + (x3), tmp65, xmask) tmp74 = tl.sum(_tmp74, 1)[:, None] tl.store(out_ptr7 + (x3), tmp74, xmask) tmp83 = tl.sum(_tmp83, 1)[:, None] tl.store(out_ptr8 + (x3), tmp83, xmask) tmp92 = tl.sum(_tmp92, 1)[:, None] tl.store(out_ptr9 + (x3), tmp92, xmask) tmp101 = tl.sum(_tmp101, 1)[:, None] tl.store(out_ptr10 + (x3), tmp101, xmask) tmp110 = tl.sum(_tmp110, 1)[:, None] tl.store(out_ptr11 + (x3), tmp110, xmask) tmp119 = tl.sum(_tmp119, 1)[:, None] tl.store(out_ptr12 + (x3), tmp119, xmask) tmp128 = tl.sum(_tmp128, 1)[:, None] tl.store(out_ptr13 + (x3), tmp128, xmask) tmp137 = tl.sum(_tmp137, 1)[:, None] tl.store(out_ptr14 + (x3), tmp137, xmask) tmp146 = tl.sum(_tmp146, 1)[:, None] tl.store(out_ptr15 + (x3), tmp146, xmask) tmp155 = tl.sum(_tmp155, 1)[:, None] tl.store(out_ptr16 + (x3), tmp155, xmask) tmp164 = tl.sum(_tmp164, 1)[:, None] tl.store(out_ptr17 + (x3), tmp164, xmask) tmp173 = tl.sum(_tmp173, 1)[:, None] tl.store(out_ptr18 + (x3), tmp173, xmask) tmp182 = tl.sum(_tmp182, 1)[:, None] tl.store(out_ptr19 + (x3), tmp182, xmask) tmp191 = tl.sum(_tmp191, 1)[:, None] tl.store(out_ptr20 + (x3), tmp191, xmask) tmp200 = tl.sum(_tmp200, 1)[:, None] tl.store(out_ptr21 + (x3), tmp200, xmask) tmp209 = tl.sum(_tmp209, 1)[:, None] tl.store(out_ptr22 + (x3), tmp209, xmask) tmp218 = tl.sum(_tmp218, 1)[:, None] tl.store(out_ptr23 + (x3), tmp218, xmask) tmp227 = tl.sum(_tmp227, 1)[:, None] tl.store(out_ptr24 + (x3), tmp227, xmask) tmp236 = tl.sum(_tmp236, 1)[:, None] tl.store(out_ptr25 + (x3), tmp236, xmask) tmp245 = tl.sum(_tmp245, 1)[:, None] tl.store(out_ptr26 + (x3), tmp245, xmask) tmp254 = tl.sum(_tmp254, 1)[:, None] tl.store(out_ptr27 + (x3), tmp254, xmask) tmp263 = tl.sum(_tmp263, 1)[:, None] tl.store(out_ptr28 + (x3), tmp263, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7g/c7gpcb637ns46u6bq6sgchjaxn4thmkzjpeoxhjhn2ws6dc2fyq4.py # Topologically Sorted Source Nodes: [residual_59, sum_30, residual_61, sum_31, residual_63, sum_32, residual_65, sum_33, residual_67, sum_34, residual_69, sum_35, residual_71, sum_36, residual_73, sum_37, residual_75, sum_38, residual_77, sum_39, residual_79, sum_40, residual_81, sum_41, residual_83, sum_42, residual_85, sum_43, residual_87, sum_44, residual_89, sum_45, residual_91, sum_46, residual_93, sum_47, residual_95, sum_48, residual_97, sum_49, residual_99, sum_50, residual_101, sum_51, residual_103, sum_52, residual_105, sum_53, residual_107, sum_54, residual_109, sum_55, residual_111, sum_56, residual_113, sum_57], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # residual_101 => mul_50 # residual_103 => mul_51 # residual_105 => mul_52 # residual_107 => mul_53 # residual_109 => mul_54 # residual_111 => mul_55 # residual_113 => mul_56 # residual_59 => mul_29 # residual_61 => mul_30 # residual_63 => mul_31 # residual_65 => mul_32 # residual_67 => mul_33 # residual_69 => mul_34 # residual_71 => mul_35 # residual_73 => mul_36 # residual_75 => mul_37 # residual_77 => mul_38 # residual_79 => mul_39 # residual_81 => mul_40 # residual_83 => mul_41 # residual_85 => mul_42 # residual_87 => mul_43 # residual_89 => mul_44 # residual_91 => mul_45 # residual_93 => mul_46 # residual_95 => mul_47 # residual_97 => mul_48 # residual_99 => mul_49 # sum_30 => sum_32 # sum_31 => sum_33 # sum_32 => sum_34 # sum_33 => sum_35 # sum_34 => sum_36 # sum_35 => sum_37 # sum_36 => sum_38 # sum_37 => sum_39 # sum_38 => sum_40 # sum_39 => sum_41 # sum_40 => sum_42 # sum_41 => sum_43 # sum_42 => sum_44 # sum_43 => sum_45 # sum_44 => sum_46 # sum_45 => sum_47 # sum_46 => sum_48 # sum_47 => sum_49 # sum_48 => sum_50 # sum_49 => sum_51 # sum_50 => sum_52 # sum_51 => sum_53 # sum_52 => sum_54 # sum_53 => sum_55 # sum_54 => sum_56 # sum_55 => sum_57 # sum_56 => sum_58 # sum_57 => sum_59 # Graph fragment: # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_30, %unsqueeze_89), kwargs = {}) # %sum_32 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_29, [-1]), kwargs = {}) # %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %unsqueeze_92), kwargs = {}) # %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_30, [-1]), kwargs = {}) # %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %unsqueeze_95), kwargs = {}) # %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_31, [-1]), kwargs = {}) # %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_33, %unsqueeze_98), kwargs = {}) # %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_32, [-1]), kwargs = {}) # %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_34, %unsqueeze_101), kwargs = {}) # %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_33, [-1]), kwargs = {}) # %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_35, %unsqueeze_104), kwargs = {}) # %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_34, [-1]), kwargs = {}) # %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_36, %unsqueeze_107), kwargs = {}) # %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_35, [-1]), kwargs = {}) # %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_37, %unsqueeze_110), kwargs = {}) # %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_36, [-1]), kwargs = {}) # %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_38, %unsqueeze_113), kwargs = {}) # %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_37, [-1]), kwargs = {}) # %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_39, %unsqueeze_116), kwargs = {}) # %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_38, [-1]), kwargs = {}) # %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_40, %unsqueeze_119), kwargs = {}) # %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_39, [-1]), kwargs = {}) # %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_41, %unsqueeze_122), kwargs = {}) # %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_40, [-1]), kwargs = {}) # %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_42, %unsqueeze_125), kwargs = {}) # %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_41, [-1]), kwargs = {}) # %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_43, %unsqueeze_128), kwargs = {}) # %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_42, [-1]), kwargs = {}) # %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_44, %unsqueeze_131), kwargs = {}) # %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_43, [-1]), kwargs = {}) # %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_45, %unsqueeze_134), kwargs = {}) # %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_44, [-1]), kwargs = {}) # %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_46, %unsqueeze_137), kwargs = {}) # %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_45, [-1]), kwargs = {}) # %mul_46 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_47, %unsqueeze_140), kwargs = {}) # %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_46, [-1]), kwargs = {}) # %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_48, %unsqueeze_143), kwargs = {}) # %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_47, [-1]), kwargs = {}) # %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_49, %unsqueeze_146), kwargs = {}) # %sum_51 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_48, [-1]), kwargs = {}) # %mul_49 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_50, %unsqueeze_149), kwargs = {}) # %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_49, [-1]), kwargs = {}) # %mul_50 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_51, %unsqueeze_152), kwargs = {}) # %sum_53 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_50, [-1]), kwargs = {}) # %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_52, %unsqueeze_155), kwargs = {}) # %sum_54 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_51, [-1]), kwargs = {}) # %mul_52 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_53, %unsqueeze_158), kwargs = {}) # %sum_55 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_52, [-1]), kwargs = {}) # %mul_53 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_54, %unsqueeze_161), kwargs = {}) # %sum_56 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_53, [-1]), kwargs = {}) # %mul_54 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_55, %unsqueeze_164), kwargs = {}) # %sum_57 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_54, [-1]), kwargs = {}) # %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_56, %unsqueeze_167), kwargs = {}) # %sum_58 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_55, [-1]), kwargs = {}) # %mul_56 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_57, %unsqueeze_170), kwargs = {}) # %sum_59 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_56, [-1]), kwargs = {}) triton_red_fused_mul_sum_4 = async_compile.triton('triton_red_fused_mul_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: 'i32', 60: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 58, 'num_reduction': 28, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = (xindex // 128) _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (118784 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (122880 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (126976 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (131072 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (135168 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (139264 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (143360 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.load(in_ptr10 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp66 = tl.load(in_ptr1 + (147456 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.load(in_ptr11 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp75 = tl.load(in_ptr1 + (151552 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp83 = tl.load(in_ptr12 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp84 = tl.load(in_ptr1 + (155648 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp92 = tl.load(in_ptr13 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp93 = tl.load(in_ptr1 + (159744 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp101 = tl.load(in_ptr14 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp102 = tl.load(in_ptr1 + (163840 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp110 = tl.load(in_ptr15 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp111 = tl.load(in_ptr1 + (167936 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp119 = tl.load(in_ptr16 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp120 = tl.load(in_ptr1 + (172032 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp128 = tl.load(in_ptr17 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp129 = tl.load(in_ptr1 + (176128 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.load(in_ptr18 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp138 = tl.load(in_ptr1 + (180224 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.load(in_ptr19 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp147 = tl.load(in_ptr1 + (184320 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp155 = tl.load(in_ptr20 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp156 = tl.load(in_ptr1 + (188416 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp164 = tl.load(in_ptr21 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp165 = tl.load(in_ptr1 + (192512 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp173 = tl.load(in_ptr22 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp174 = tl.load(in_ptr1 + (196608 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp182 = tl.load(in_ptr23 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp183 = tl.load(in_ptr1 + (200704 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp191 = tl.load(in_ptr24 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp192 = tl.load(in_ptr1 + (204800 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp200 = tl.load(in_ptr25 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp201 = tl.load(in_ptr1 + (208896 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.load(in_ptr26 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp210 = tl.load(in_ptr1 + (212992 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.load(in_ptr27 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp219 = tl.load(in_ptr1 + (217088 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp227 = tl.load(in_ptr28 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp228 = tl.load(in_ptr1 + (221184 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp236 = tl.load(in_ptr29 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp237 = tl.load(in_ptr1 + (225280 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp245 = tl.load(in_ptr30 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp246 = tl.load(in_ptr1 + (229376 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp67 = tmp66 - tmp2 tmp68 = tl_math.exp(tmp67) tmp69 = tmp68 / tmp5 tmp70 = tmp65 * tmp69 tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK]) tmp73 = _tmp72 + tmp71 _tmp72 = tl.where(rmask & xmask, tmp73, _tmp72) tmp76 = tmp75 - tmp2 tmp77 = tl_math.exp(tmp76) tmp78 = tmp77 / tmp5 tmp79 = tmp74 * tmp78 tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK]) tmp82 = _tmp81 + tmp80 _tmp81 = tl.where(rmask & xmask, tmp82, _tmp81) tmp85 = tmp84 - tmp2 tmp86 = tl_math.exp(tmp85) tmp87 = tmp86 / tmp5 tmp88 = tmp83 * tmp87 tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK]) tmp91 = _tmp90 + tmp89 _tmp90 = tl.where(rmask & xmask, tmp91, _tmp90) tmp94 = tmp93 - tmp2 tmp95 = tl_math.exp(tmp94) tmp96 = tmp95 / tmp5 tmp97 = tmp92 * tmp96 tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK]) tmp100 = _tmp99 + tmp98 _tmp99 = tl.where(rmask & xmask, tmp100, _tmp99) tmp103 = tmp102 - tmp2 tmp104 = tl_math.exp(tmp103) tmp105 = tmp104 / tmp5 tmp106 = tmp101 * tmp105 tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK]) tmp109 = _tmp108 + tmp107 _tmp108 = tl.where(rmask & xmask, tmp109, _tmp108) tmp112 = tmp111 - tmp2 tmp113 = tl_math.exp(tmp112) tmp114 = tmp113 / tmp5 tmp115 = tmp110 * tmp114 tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK]) tmp118 = _tmp117 + tmp116 _tmp117 = tl.where(rmask & xmask, tmp118, _tmp117) tmp121 = tmp120 - tmp2 tmp122 = tl_math.exp(tmp121) tmp123 = tmp122 / tmp5 tmp124 = tmp119 * tmp123 tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK]) tmp127 = _tmp126 + tmp125 _tmp126 = tl.where(rmask & xmask, tmp127, _tmp126) tmp130 = tmp129 - tmp2 tmp131 = tl_math.exp(tmp130) tmp132 = tmp131 / tmp5 tmp133 = tmp128 * tmp132 tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK]) tmp136 = _tmp135 + tmp134 _tmp135 = tl.where(rmask & xmask, tmp136, _tmp135) tmp139 = tmp138 - tmp2 tmp140 = tl_math.exp(tmp139) tmp141 = tmp140 / tmp5 tmp142 = tmp137 * tmp141 tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK]) tmp145 = _tmp144 + tmp143 _tmp144 = tl.where(rmask & xmask, tmp145, _tmp144) tmp148 = tmp147 - tmp2 tmp149 = tl_math.exp(tmp148) tmp150 = tmp149 / tmp5 tmp151 = tmp146 * tmp150 tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK]) tmp154 = _tmp153 + tmp152 _tmp153 = tl.where(rmask & xmask, tmp154, _tmp153) tmp157 = tmp156 - tmp2 tmp158 = tl_math.exp(tmp157) tmp159 = tmp158 / tmp5 tmp160 = tmp155 * tmp159 tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK]) tmp163 = _tmp162 + tmp161 _tmp162 = tl.where(rmask & xmask, tmp163, _tmp162) tmp166 = tmp165 - tmp2 tmp167 = tl_math.exp(tmp166) tmp168 = tmp167 / tmp5 tmp169 = tmp164 * tmp168 tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK]) tmp172 = _tmp171 + tmp170 _tmp171 = tl.where(rmask & xmask, tmp172, _tmp171) tmp175 = tmp174 - tmp2 tmp176 = tl_math.exp(tmp175) tmp177 = tmp176 / tmp5 tmp178 = tmp173 * tmp177 tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK]) tmp181 = _tmp180 + tmp179 _tmp180 = tl.where(rmask & xmask, tmp181, _tmp180) tmp184 = tmp183 - tmp2 tmp185 = tl_math.exp(tmp184) tmp186 = tmp185 / tmp5 tmp187 = tmp182 * tmp186 tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK]) tmp190 = _tmp189 + tmp188 _tmp189 = tl.where(rmask & xmask, tmp190, _tmp189) tmp193 = tmp192 - tmp2 tmp194 = tl_math.exp(tmp193) tmp195 = tmp194 / tmp5 tmp196 = tmp191 * tmp195 tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK]) tmp199 = _tmp198 + tmp197 _tmp198 = tl.where(rmask & xmask, tmp199, _tmp198) tmp202 = tmp201 - tmp2 tmp203 = tl_math.exp(tmp202) tmp204 = tmp203 / tmp5 tmp205 = tmp200 * tmp204 tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK]) tmp208 = _tmp207 + tmp206 _tmp207 = tl.where(rmask & xmask, tmp208, _tmp207) tmp211 = tmp210 - tmp2 tmp212 = tl_math.exp(tmp211) tmp213 = tmp212 / tmp5 tmp214 = tmp209 * tmp213 tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK]) tmp217 = _tmp216 + tmp215 _tmp216 = tl.where(rmask & xmask, tmp217, _tmp216) tmp220 = tmp219 - tmp2 tmp221 = tl_math.exp(tmp220) tmp222 = tmp221 / tmp5 tmp223 = tmp218 * tmp222 tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK]) tmp226 = _tmp225 + tmp224 _tmp225 = tl.where(rmask & xmask, tmp226, _tmp225) tmp229 = tmp228 - tmp2 tmp230 = tl_math.exp(tmp229) tmp231 = tmp230 / tmp5 tmp232 = tmp227 * tmp231 tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK]) tmp235 = _tmp234 + tmp233 _tmp234 = tl.where(rmask & xmask, tmp235, _tmp234) tmp238 = tmp237 - tmp2 tmp239 = tl_math.exp(tmp238) tmp240 = tmp239 / tmp5 tmp241 = tmp236 * tmp240 tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK]) tmp244 = _tmp243 + tmp242 _tmp243 = tl.where(rmask & xmask, tmp244, _tmp243) tmp247 = tmp246 - tmp2 tmp248 = tl_math.exp(tmp247) tmp249 = tmp248 / tmp5 tmp250 = tmp245 * tmp249 tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK]) tmp253 = _tmp252 + tmp251 _tmp252 = tl.where(rmask & xmask, tmp253, _tmp252) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + (x3), tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + (x3), tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + (x3), tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + (x3), tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + (x3), tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + (x3), tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + (x3), tmp63, xmask) tmp72 = tl.sum(_tmp72, 1)[:, None] tl.store(out_ptr7 + (x3), tmp72, xmask) tmp81 = tl.sum(_tmp81, 1)[:, None] tl.store(out_ptr8 + (x3), tmp81, xmask) tmp90 = tl.sum(_tmp90, 1)[:, None] tl.store(out_ptr9 + (x3), tmp90, xmask) tmp99 = tl.sum(_tmp99, 1)[:, None] tl.store(out_ptr10 + (x3), tmp99, xmask) tmp108 = tl.sum(_tmp108, 1)[:, None] tl.store(out_ptr11 + (x3), tmp108, xmask) tmp117 = tl.sum(_tmp117, 1)[:, None] tl.store(out_ptr12 + (x3), tmp117, xmask) tmp126 = tl.sum(_tmp126, 1)[:, None] tl.store(out_ptr13 + (x3), tmp126, xmask) tmp135 = tl.sum(_tmp135, 1)[:, None] tl.store(out_ptr14 + (x3), tmp135, xmask) tmp144 = tl.sum(_tmp144, 1)[:, None] tl.store(out_ptr15 + (x3), tmp144, xmask) tmp153 = tl.sum(_tmp153, 1)[:, None] tl.store(out_ptr16 + (x3), tmp153, xmask) tmp162 = tl.sum(_tmp162, 1)[:, None] tl.store(out_ptr17 + (x3), tmp162, xmask) tmp171 = tl.sum(_tmp171, 1)[:, None] tl.store(out_ptr18 + (x3), tmp171, xmask) tmp180 = tl.sum(_tmp180, 1)[:, None] tl.store(out_ptr19 + (x3), tmp180, xmask) tmp189 = tl.sum(_tmp189, 1)[:, None] tl.store(out_ptr20 + (x3), tmp189, xmask) tmp198 = tl.sum(_tmp198, 1)[:, None] tl.store(out_ptr21 + (x3), tmp198, xmask) tmp207 = tl.sum(_tmp207, 1)[:, None] tl.store(out_ptr22 + (x3), tmp207, xmask) tmp216 = tl.sum(_tmp216, 1)[:, None] tl.store(out_ptr23 + (x3), tmp216, xmask) tmp225 = tl.sum(_tmp225, 1)[:, None] tl.store(out_ptr24 + (x3), tmp225, xmask) tmp234 = tl.sum(_tmp234, 1)[:, None] tl.store(out_ptr25 + (x3), tmp234, xmask) tmp243 = tl.sum(_tmp243, 1)[:, None] tl.store(out_ptr26 + (x3), tmp243, xmask) tmp252 = tl.sum(_tmp252, 1)[:, None] tl.store(out_ptr27 + (x3), tmp252, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/m5/cm5hatbwewijgqsezz7mpghb6gtaqevomtlc673msign42fqnq42.py # Topologically Sorted Source Nodes: [residual_115, sum_58, residual_117, sum_59, residual_119, sum_60, residual_121, sum_61, residual_123, sum_62, residual_125, sum_63, residual_127, sum_64], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # residual_115 => mul_57 # residual_117 => mul_58 # residual_119 => mul_59 # residual_121 => mul_60 # residual_123 => mul_61 # residual_125 => mul_62 # residual_127 => mul_63 # sum_58 => sum_60 # sum_59 => sum_61 # sum_60 => sum_62 # sum_61 => sum_63 # sum_62 => sum_64 # sum_63 => sum_65 # sum_64 => sum_66 # Graph fragment: # %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_58, %unsqueeze_173), kwargs = {}) # %sum_60 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_57, [-1]), kwargs = {}) # %mul_58 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_59, %unsqueeze_176), kwargs = {}) # %sum_61 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_58, [-1]), kwargs = {}) # %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_60, %unsqueeze_179), kwargs = {}) # %sum_62 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_59, [-1]), kwargs = {}) # %mul_60 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_61, %unsqueeze_182), kwargs = {}) # %sum_63 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_60, [-1]), kwargs = {}) # %mul_61 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_62, %unsqueeze_185), kwargs = {}) # %sum_64 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_61, [-1]), kwargs = {}) # %mul_62 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_63, %unsqueeze_188), kwargs = {}) # %sum_65 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_62, [-1]), kwargs = {}) # %mul_63 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_64, %unsqueeze_191), kwargs = {}) # %sum_66 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_63, [-1]), kwargs = {}) triton_red_fused_mul_sum_5 = async_compile.triton('triton_red_fused_mul_sum_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32', 18: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 7, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = (xindex // 128) _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (233472 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (237568 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (241664 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (245760 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (249856 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (253952 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (258048 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + (x3), tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + (x3), tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + (x3), tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + (x3), tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + (x3), tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + (x3), tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + (x3), tmp63, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ew/cewrqjskzvue6r2kcna4men3gingd3ajhrksiyyptwu2fliqalf7.py # Topologically Sorted Source Nodes: [vlad, setitem, setitem_1, setitem_2, setitem_3, setitem_4, setitem_5, setitem_6, setitem_7, setitem_8, setitem_9, setitem_10, setitem_11, setitem_12, setitem_13, setitem_14, setitem_15, setitem_16, setitem_17, setitem_18, setitem_19, setitem_20, setitem_21, setitem_22, setitem_23, setitem_24, setitem_25, setitem_26, setitem_27, setitem_28, setitem_29, setitem_30, setitem_31, setitem_32, setitem_33, setitem_34, setitem_35, setitem_36, setitem_37, setitem_38, setitem_39, setitem_40, setitem_41, setitem_42, setitem_43, setitem_44, setitem_45, setitem_46, setitem_47, setitem_48, setitem_49, setitem_50, setitem_51, setitem_52, setitem_53, setitem_54, setitem_55, setitem_56, setitem_57, setitem_58, setitem_59, setitem_60, setitem_61, setitem_62, setitem_63, vlad_1], Original ATen: [aten.zeros, aten.copy, aten.linalg_vector_norm] # Source node to ATen node mapping: # setitem => copy # setitem_1 => copy_1 # setitem_10 => copy_10 # setitem_11 => copy_11 # setitem_12 => copy_12 # setitem_13 => copy_13 # setitem_14 => copy_14 # setitem_15 => copy_15 # setitem_16 => copy_16 # setitem_17 => copy_17 # setitem_18 => copy_18 # setitem_19 => copy_19 # setitem_2 => copy_2 # setitem_20 => copy_20 # setitem_21 => copy_21 # setitem_22 => copy_22 # setitem_23 => copy_23 # setitem_24 => copy_24 # setitem_25 => copy_25 # setitem_26 => copy_26 # setitem_27 => copy_27 # setitem_28 => copy_28 # setitem_29 => copy_29 # setitem_3 => copy_3 # setitem_30 => copy_30 # setitem_31 => copy_31 # setitem_32 => copy_32 # setitem_33 => copy_33 # setitem_34 => copy_34 # setitem_35 => copy_35 # setitem_36 => copy_36 # setitem_37 => copy_37 # setitem_38 => copy_38 # setitem_39 => copy_39 # setitem_4 => copy_4 # setitem_40 => copy_40 # setitem_41 => copy_41 # setitem_42 => copy_42 # setitem_43 => copy_43 # setitem_44 => copy_44 # setitem_45 => copy_45 # setitem_46 => copy_46 # setitem_47 => copy_47 # setitem_48 => copy_48 # setitem_49 => copy_49 # setitem_5 => copy_5 # setitem_50 => copy_50 # setitem_51 => copy_51 # setitem_52 => copy_52 # setitem_53 => copy_53 # setitem_54 => copy_54 # setitem_55 => copy_55 # setitem_56 => copy_56 # setitem_57 => copy_57 # setitem_58 => copy_58 # setitem_59 => copy_59 # setitem_6 => copy_6 # setitem_60 => copy_60 # setitem_61 => copy_61 # setitem_62 => copy_62 # setitem_63 => copy_63 # setitem_7 => copy_7 # setitem_8 => copy_8 # setitem_9 => copy_9 # vlad => full # vlad_1 => pow_3, pow_4, sum_67 # Graph fragment: # %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 64, 128], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_7, %sum_3), kwargs = {}) # %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%full, %copy, 1, 0, 1), kwargs = {}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_26, %sum_4), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %copy_1, 1, 1, 2), kwargs = {}) # %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_45, %sum_5), kwargs = {}) # %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %copy_2, 1, 2, 3), kwargs = {}) # %copy_3 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_64, %sum_6), kwargs = {}) # %slice_scatter_default_3 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_2, %copy_3, 1, 3, 4), kwargs = {}) # %copy_4 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_83, %sum_7), kwargs = {}) # %slice_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_3, %copy_4, 1, 4, 5), kwargs = {}) # %copy_5 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_102, %sum_8), kwargs = {}) # %slice_scatter_default_5 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %copy_5, 1, 5, 6), kwargs = {}) # %copy_6 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_121, %sum_9), kwargs = {}) # %slice_scatter_default_6 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_5, %copy_6, 1, 6, 7), kwargs = {}) # %copy_7 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_140, %sum_10), kwargs = {}) # %slice_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_6, %copy_7, 1, 7, 8), kwargs = {}) # %copy_8 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_159, %sum_11), kwargs = {}) # %slice_scatter_default_8 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %copy_8, 1, 8, 9), kwargs = {}) # %copy_9 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_178, %sum_12), kwargs = {}) # %slice_scatter_default_9 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_8, %copy_9, 1, 9, 10), kwargs = {}) # %copy_10 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_197, %sum_13), kwargs = {}) # %slice_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_9, %copy_10, 1, 10, 11), kwargs = {}) # %copy_11 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_216, %sum_14), kwargs = {}) # %slice_scatter_default_11 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_10, %copy_11, 1, 11, 12), kwargs = {}) # %copy_12 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_235, %sum_15), kwargs = {}) # %slice_scatter_default_12 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_11, %copy_12, 1, 12, 13), kwargs = {}) # %copy_13 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_254, %sum_16), kwargs = {}) # %slice_scatter_default_13 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_12, %copy_13, 1, 13, 14), kwargs = {}) # %copy_14 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_273, %sum_17), kwargs = {}) # %slice_scatter_default_14 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_13, %copy_14, 1, 14, 15), kwargs = {}) # %copy_15 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_292, %sum_18), kwargs = {}) # %slice_scatter_default_15 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_14, %copy_15, 1, 15, 16), kwargs = {}) # %copy_16 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_311, %sum_19), kwargs = {}) # %slice_scatter_default_16 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_15, %copy_16, 1, 16, 17), kwargs = {}) # %copy_17 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_330, %sum_20), kwargs = {}) # %slice_scatter_default_17 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_16, %copy_17, 1, 17, 18), kwargs = {}) # %copy_18 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_349, %sum_21), kwargs = {}) # %slice_scatter_default_18 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_17, %copy_18, 1, 18, 19), kwargs = {}) # %copy_19 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_368, %sum_22), kwargs = {}) # %slice_scatter_default_19 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_18, %copy_19, 1, 19, 20), kwargs = {}) # %copy_20 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_387, %sum_23), kwargs = {}) # %slice_scatter_default_20 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_19, %copy_20, 1, 20, 21), kwargs = {}) # %copy_21 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_406, %sum_24), kwargs = {}) # %slice_scatter_default_21 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_20, %copy_21, 1, 21, 22), kwargs = {}) # %copy_22 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_425, %sum_25), kwargs = {}) # %slice_scatter_default_22 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_21, %copy_22, 1, 22, 23), kwargs = {}) # %copy_23 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_444, %sum_26), kwargs = {}) # %slice_scatter_default_23 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_22, %copy_23, 1, 23, 24), kwargs = {}) # %copy_24 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_463, %sum_27), kwargs = {}) # %slice_scatter_default_24 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_23, %copy_24, 1, 24, 25), kwargs = {}) # %copy_25 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_482, %sum_28), kwargs = {}) # %slice_scatter_default_25 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_24, %copy_25, 1, 25, 26), kwargs = {}) # %copy_26 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_501, %sum_29), kwargs = {}) # %slice_scatter_default_26 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_25, %copy_26, 1, 26, 27), kwargs = {}) # %copy_27 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_520, %sum_30), kwargs = {}) # %slice_scatter_default_27 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_26, %copy_27, 1, 27, 28), kwargs = {}) # %copy_28 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_539, %sum_31), kwargs = {}) # %slice_scatter_default_28 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_27, %copy_28, 1, 28, 29), kwargs = {}) # %copy_29 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_558, %sum_32), kwargs = {}) # %slice_scatter_default_29 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_28, %copy_29, 1, 29, 30), kwargs = {}) # %copy_30 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_577, %sum_33), kwargs = {}) # %slice_scatter_default_30 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_29, %copy_30, 1, 30, 31), kwargs = {}) # %copy_31 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_596, %sum_34), kwargs = {}) # %slice_scatter_default_31 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_30, %copy_31, 1, 31, 32), kwargs = {}) # %copy_32 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_615, %sum_35), kwargs = {}) # %slice_scatter_default_32 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_31, %copy_32, 1, 32, 33), kwargs = {}) # %copy_33 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_634, %sum_36), kwargs = {}) # %slice_scatter_default_33 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_32, %copy_33, 1, 33, 34), kwargs = {}) # %copy_34 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_653, %sum_37), kwargs = {}) # %slice_scatter_default_34 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_33, %copy_34, 1, 34, 35), kwargs = {}) # %copy_35 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_672, %sum_38), kwargs = {}) # %slice_scatter_default_35 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_34, %copy_35, 1, 35, 36), kwargs = {}) # %copy_36 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_691, %sum_39), kwargs = {}) # %slice_scatter_default_36 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_35, %copy_36, 1, 36, 37), kwargs = {}) # %copy_37 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_710, %sum_40), kwargs = {}) # %slice_scatter_default_37 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_36, %copy_37, 1, 37, 38), kwargs = {}) # %copy_38 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_729, %sum_41), kwargs = {}) # %slice_scatter_default_38 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_37, %copy_38, 1, 38, 39), kwargs = {}) # %copy_39 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_748, %sum_42), kwargs = {}) # %slice_scatter_default_39 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_38, %copy_39, 1, 39, 40), kwargs = {}) # %copy_40 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_767, %sum_43), kwargs = {}) # %slice_scatter_default_40 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_39, %copy_40, 1, 40, 41), kwargs = {}) # %copy_41 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_786, %sum_44), kwargs = {}) # %slice_scatter_default_41 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_40, %copy_41, 1, 41, 42), kwargs = {}) # %copy_42 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_805, %sum_45), kwargs = {}) # %slice_scatter_default_42 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_41, %copy_42, 1, 42, 43), kwargs = {}) # %copy_43 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_824, %sum_46), kwargs = {}) # %slice_scatter_default_43 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_42, %copy_43, 1, 43, 44), kwargs = {}) # %copy_44 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_843, %sum_47), kwargs = {}) # %slice_scatter_default_44 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_43, %copy_44, 1, 44, 45), kwargs = {}) # %copy_45 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_862, %sum_48), kwargs = {}) # %slice_scatter_default_45 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_44, %copy_45, 1, 45, 46), kwargs = {}) # %copy_46 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_881, %sum_49), kwargs = {}) # %slice_scatter_default_46 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_45, %copy_46, 1, 46, 47), kwargs = {}) # %copy_47 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_900, %sum_50), kwargs = {}) # %slice_scatter_default_47 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_46, %copy_47, 1, 47, 48), kwargs = {}) # %copy_48 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_919, %sum_51), kwargs = {}) # %slice_scatter_default_48 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_47, %copy_48, 1, 48, 49), kwargs = {}) # %copy_49 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_938, %sum_52), kwargs = {}) # %slice_scatter_default_49 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_48, %copy_49, 1, 49, 50), kwargs = {}) # %copy_50 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_957, %sum_53), kwargs = {}) # %slice_scatter_default_50 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_49, %copy_50, 1, 50, 51), kwargs = {}) # %copy_51 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_976, %sum_54), kwargs = {}) # %slice_scatter_default_51 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_50, %copy_51, 1, 51, 52), kwargs = {}) # %copy_52 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_995, %sum_55), kwargs = {}) # %slice_scatter_default_52 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_51, %copy_52, 1, 52, 53), kwargs = {}) # %copy_53 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1014, %sum_56), kwargs = {}) # %slice_scatter_default_53 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_52, %copy_53, 1, 53, 54), kwargs = {}) # %copy_54 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1033, %sum_57), kwargs = {}) # %slice_scatter_default_54 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_53, %copy_54, 1, 54, 55), kwargs = {}) # %copy_55 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1052, %sum_58), kwargs = {}) # %slice_scatter_default_55 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_54, %copy_55, 1, 55, 56), kwargs = {}) # %copy_56 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1071, %sum_59), kwargs = {}) # %slice_scatter_default_56 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_55, %copy_56, 1, 56, 57), kwargs = {}) # %copy_57 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1090, %sum_60), kwargs = {}) # %slice_scatter_default_57 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_56, %copy_57, 1, 57, 58), kwargs = {}) # %copy_58 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1109, %sum_61), kwargs = {}) # %slice_scatter_default_58 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_57, %copy_58, 1, 58, 59), kwargs = {}) # %copy_59 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1128, %sum_62), kwargs = {}) # %slice_scatter_default_59 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_58, %copy_59, 1, 59, 60), kwargs = {}) # %copy_60 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1147, %sum_63), kwargs = {}) # %slice_scatter_default_60 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_59, %copy_60, 1, 60, 61), kwargs = {}) # %copy_61 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1166, %sum_64), kwargs = {}) # %slice_scatter_default_61 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_60, %copy_61, 1, 61, 62), kwargs = {}) # %copy_62 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1185, %sum_65), kwargs = {}) # %slice_scatter_default_62 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_61, %copy_62, 1, 62, 63), kwargs = {}) # %copy_63 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1204, %sum_66), kwargs = {}) # %slice_scatter_default_63 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_62, %copy_63, 1, 63, 64), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%slice_scatter_default_63, 2), kwargs = {}) # %sum_67 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2], True), kwargs = {}) # %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_67, 0.5), kwargs = {}) triton_per_fused_copy_linalg_vector_norm_zeros_6 = async_compile.triton('triton_per_fused_copy_linalg_vector_norm_zeros_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: '*fp32', 66: 'i32', 67: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_copy_linalg_vector_norm_zeros_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 64, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 64 r2 = rindex x1 = (xindex // 64) x3 = xindex tmp0 = x0 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (r2 + (128*x1)), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.full([1, 1], 3, tl.int64) tmp8 = tmp0 >= tmp7 tmp9 = tmp0 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tl.load(in_ptr1 + (r2 + (128*x1)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp0 >= tmp12 tmp14 = tmp0 < tmp7 tmp15 = tmp13 & tmp14 tmp16 = tl.load(in_ptr2 + (r2 + (128*x1)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.full([1, 1], 1, tl.int64) tmp18 = tmp0 >= tmp17 tmp19 = tmp0 < tmp12 tmp20 = tmp18 & tmp19 tmp21 = tl.load(in_ptr3 + (r2 + (128*x1)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp0 < tmp17 tmp23 = tl.load(in_ptr4 + (r2 + (128*x1)), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = 0.0 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tl.where(tmp20, tmp21, tmp25) tmp27 = tl.where(tmp15, tmp16, tmp26) tmp28 = tl.where(tmp10, tmp11, tmp27) tmp29 = tl.where(tmp5, tmp6, tmp28) tmp30 = tl.full([1, 1], 8, tl.int64) tmp31 = tmp0 >= tmp30 tmp32 = tl.full([1, 1], 9, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = tl.load(in_ptr5 + (r2 + (128*x1)), tmp34 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tl.full([1, 1], 7, tl.int64) tmp37 = tmp0 >= tmp36 tmp38 = tmp0 < tmp30 tmp39 = tmp37 & tmp38 tmp40 = tl.load(in_ptr6 + (r2 + (128*x1)), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = tl.full([1, 1], 6, tl.int64) tmp42 = tmp0 >= tmp41 tmp43 = tmp0 < tmp36 tmp44 = tmp42 & tmp43 tmp45 = tl.load(in_ptr7 + (r2 + (128*x1)), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp0 >= tmp3 tmp47 = tmp0 < tmp41 tmp48 = tmp46 & tmp47 tmp49 = tl.load(in_ptr8 + (r2 + (128*x1)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.where(tmp48, tmp49, tmp29) tmp51 = tl.where(tmp44, tmp45, tmp50) tmp52 = tl.where(tmp39, tmp40, tmp51) tmp53 = tl.where(tmp34, tmp35, tmp52) tmp54 = tl.full([1, 1], 12, tl.int64) tmp55 = tmp0 >= tmp54 tmp56 = tl.full([1, 1], 13, tl.int64) tmp57 = tmp0 < tmp56 tmp58 = tmp55 & tmp57 tmp59 = tl.load(in_ptr9 + (r2 + (128*x1)), tmp58 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.full([1, 1], 11, tl.int64) tmp61 = tmp0 >= tmp60 tmp62 = tmp0 < tmp54 tmp63 = tmp61 & tmp62 tmp64 = tl.load(in_ptr10 + (r2 + (128*x1)), tmp63 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.full([1, 1], 10, tl.int64) tmp66 = tmp0 >= tmp65 tmp67 = tmp0 < tmp60 tmp68 = tmp66 & tmp67 tmp69 = tl.load(in_ptr11 + (r2 + (128*x1)), tmp68 & xmask, eviction_policy='evict_last', other=0.0) tmp70 = tmp0 >= tmp32 tmp71 = tmp0 < tmp65 tmp72 = tmp70 & tmp71 tmp73 = tl.load(in_ptr12 + (r2 + (128*x1)), tmp72 & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.where(tmp72, tmp73, tmp53) tmp75 = tl.where(tmp68, tmp69, tmp74) tmp76 = tl.where(tmp63, tmp64, tmp75) tmp77 = tl.where(tmp58, tmp59, tmp76) tmp78 = tl.full([1, 1], 16, tl.int64) tmp79 = tmp0 >= tmp78 tmp80 = tl.full([1, 1], 17, tl.int64) tmp81 = tmp0 < tmp80 tmp82 = tmp79 & tmp81 tmp83 = tl.load(in_ptr13 + (r2 + (128*x1)), tmp82 & xmask, eviction_policy='evict_last', other=0.0) tmp84 = tl.full([1, 1], 15, tl.int64) tmp85 = tmp0 >= tmp84 tmp86 = tmp0 < tmp78 tmp87 = tmp85 & tmp86 tmp88 = tl.load(in_ptr14 + (r2 + (128*x1)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full([1, 1], 14, tl.int64) tmp90 = tmp0 >= tmp89 tmp91 = tmp0 < tmp84 tmp92 = tmp90 & tmp91 tmp93 = tl.load(in_ptr15 + (r2 + (128*x1)), tmp92 & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tmp0 >= tmp56 tmp95 = tmp0 < tmp89 tmp96 = tmp94 & tmp95 tmp97 = tl.load(in_ptr16 + (r2 + (128*x1)), tmp96 & xmask, eviction_policy='evict_last', other=0.0) tmp98 = tl.where(tmp96, tmp97, tmp77) tmp99 = tl.where(tmp92, tmp93, tmp98) tmp100 = tl.where(tmp87, tmp88, tmp99) tmp101 = tl.where(tmp82, tmp83, tmp100) tmp102 = tl.full([1, 1], 20, tl.int64) tmp103 = tmp0 >= tmp102 tmp104 = tl.full([1, 1], 21, tl.int64) tmp105 = tmp0 < tmp104 tmp106 = tmp103 & tmp105 tmp107 = tl.load(in_ptr17 + (r2 + (128*x1)), tmp106 & xmask, eviction_policy='evict_last', other=0.0) tmp108 = tl.full([1, 1], 19, tl.int64) tmp109 = tmp0 >= tmp108 tmp110 = tmp0 < tmp102 tmp111 = tmp109 & tmp110 tmp112 = tl.load(in_ptr18 + (r2 + (128*x1)), tmp111 & xmask, eviction_policy='evict_last', other=0.0) tmp113 = tl.full([1, 1], 18, tl.int64) tmp114 = tmp0 >= tmp113 tmp115 = tmp0 < tmp108 tmp116 = tmp114 & tmp115 tmp117 = tl.load(in_ptr19 + (r2 + (128*x1)), tmp116 & xmask, eviction_policy='evict_last', other=0.0) tmp118 = tmp0 >= tmp80 tmp119 = tmp0 < tmp113 tmp120 = tmp118 & tmp119 tmp121 = tl.load(in_ptr20 + (r2 + (128*x1)), tmp120 & xmask, eviction_policy='evict_last', other=0.0) tmp122 = tl.where(tmp120, tmp121, tmp101) tmp123 = tl.where(tmp116, tmp117, tmp122) tmp124 = tl.where(tmp111, tmp112, tmp123) tmp125 = tl.where(tmp106, tmp107, tmp124) tmp126 = tl.full([1, 1], 24, tl.int64) tmp127 = tmp0 >= tmp126 tmp128 = tl.full([1, 1], 25, tl.int64) tmp129 = tmp0 < tmp128 tmp130 = tmp127 & tmp129 tmp131 = tl.load(in_ptr21 + (r2 + (128*x1)), tmp130 & xmask, eviction_policy='evict_last', other=0.0) tmp132 = tl.full([1, 1], 23, tl.int64) tmp133 = tmp0 >= tmp132 tmp134 = tmp0 < tmp126 tmp135 = tmp133 & tmp134 tmp136 = tl.load(in_ptr22 + (r2 + (128*x1)), tmp135 & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.full([1, 1], 22, tl.int64) tmp138 = tmp0 >= tmp137 tmp139 = tmp0 < tmp132 tmp140 = tmp138 & tmp139 tmp141 = tl.load(in_ptr23 + (r2 + (128*x1)), tmp140 & xmask, eviction_policy='evict_last', other=0.0) tmp142 = tmp0 >= tmp104 tmp143 = tmp0 < tmp137 tmp144 = tmp142 & tmp143 tmp145 = tl.load(in_ptr24 + (r2 + (128*x1)), tmp144 & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.where(tmp144, tmp145, tmp125) tmp147 = tl.where(tmp140, tmp141, tmp146) tmp148 = tl.where(tmp135, tmp136, tmp147) tmp149 = tl.where(tmp130, tmp131, tmp148) tmp150 = tl.full([1, 1], 28, tl.int64) tmp151 = tmp0 >= tmp150 tmp152 = tl.full([1, 1], 29, tl.int64) tmp153 = tmp0 < tmp152 tmp154 = tmp151 & tmp153 tmp155 = tl.load(in_ptr25 + (r2 + (128*x1)), tmp154 & xmask, eviction_policy='evict_last', other=0.0) tmp156 = tl.full([1, 1], 27, tl.int64) tmp157 = tmp0 >= tmp156 tmp158 = tmp0 < tmp150 tmp159 = tmp157 & tmp158 tmp160 = tl.load(in_ptr26 + (r2 + (128*x1)), tmp159 & xmask, eviction_policy='evict_last', other=0.0) tmp161 = tl.full([1, 1], 26, tl.int64) tmp162 = tmp0 >= tmp161 tmp163 = tmp0 < tmp156 tmp164 = tmp162 & tmp163 tmp165 = tl.load(in_ptr27 + (r2 + (128*x1)), tmp164 & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tmp0 >= tmp128 tmp167 = tmp0 < tmp161 tmp168 = tmp166 & tmp167 tmp169 = tl.load(in_ptr28 + (r2 + (128*x1)), tmp168 & xmask, eviction_policy='evict_last', other=0.0) tmp170 = tl.where(tmp168, tmp169, tmp149) tmp171 = tl.where(tmp164, tmp165, tmp170) tmp172 = tl.where(tmp159, tmp160, tmp171) tmp173 = tl.where(tmp154, tmp155, tmp172) tmp174 = tl.full([1, 1], 32, tl.int64) tmp175 = tmp0 >= tmp174 tmp176 = tl.full([1, 1], 33, tl.int64) tmp177 = tmp0 < tmp176 tmp178 = tmp175 & tmp177 tmp179 = tl.load(in_ptr29 + (r2 + (128*x1)), tmp178 & xmask, eviction_policy='evict_last', other=0.0) tmp180 = tl.full([1, 1], 31, tl.int64) tmp181 = tmp0 >= tmp180 tmp182 = tmp0 < tmp174 tmp183 = tmp181 & tmp182 tmp184 = tl.load(in_ptr30 + (r2 + (128*x1)), tmp183 & xmask, eviction_policy='evict_last', other=0.0) tmp185 = tl.full([1, 1], 30, tl.int64) tmp186 = tmp0 >= tmp185 tmp187 = tmp0 < tmp180 tmp188 = tmp186 & tmp187 tmp189 = tl.load(in_ptr31 + (r2 + (128*x1)), tmp188 & xmask, eviction_policy='evict_last', other=0.0) tmp190 = tmp0 >= tmp152 tmp191 = tmp0 < tmp185 tmp192 = tmp190 & tmp191 tmp193 = tl.load(in_ptr32 + (r2 + (128*x1)), tmp192 & xmask, eviction_policy='evict_last', other=0.0) tmp194 = tl.where(tmp192, tmp193, tmp173) tmp195 = tl.where(tmp188, tmp189, tmp194) tmp196 = tl.where(tmp183, tmp184, tmp195) tmp197 = tl.where(tmp178, tmp179, tmp196) tmp198 = tl.full([1, 1], 36, tl.int64) tmp199 = tmp0 >= tmp198 tmp200 = tl.full([1, 1], 37, tl.int64) tmp201 = tmp0 < tmp200 tmp202 = tmp199 & tmp201 tmp203 = tl.load(in_ptr33 + (r2 + (128*x1)), tmp202 & xmask, eviction_policy='evict_last', other=0.0) tmp204 = tl.full([1, 1], 35, tl.int64) tmp205 = tmp0 >= tmp204 tmp206 = tmp0 < tmp198 tmp207 = tmp205 & tmp206 tmp208 = tl.load(in_ptr34 + (r2 + (128*x1)), tmp207 & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.full([1, 1], 34, tl.int64) tmp210 = tmp0 >= tmp209 tmp211 = tmp0 < tmp204 tmp212 = tmp210 & tmp211 tmp213 = tl.load(in_ptr35 + (r2 + (128*x1)), tmp212 & xmask, eviction_policy='evict_last', other=0.0) tmp214 = tmp0 >= tmp176 tmp215 = tmp0 < tmp209 tmp216 = tmp214 & tmp215 tmp217 = tl.load(in_ptr36 + (r2 + (128*x1)), tmp216 & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.where(tmp216, tmp217, tmp197) tmp219 = tl.where(tmp212, tmp213, tmp218) tmp220 = tl.where(tmp207, tmp208, tmp219) tmp221 = tl.where(tmp202, tmp203, tmp220) tmp222 = tl.full([1, 1], 40, tl.int64) tmp223 = tmp0 >= tmp222 tmp224 = tl.full([1, 1], 41, tl.int64) tmp225 = tmp0 < tmp224 tmp226 = tmp223 & tmp225 tmp227 = tl.load(in_ptr37 + (r2 + (128*x1)), tmp226 & xmask, eviction_policy='evict_last', other=0.0) tmp228 = tl.full([1, 1], 39, tl.int64) tmp229 = tmp0 >= tmp228 tmp230 = tmp0 < tmp222 tmp231 = tmp229 & tmp230 tmp232 = tl.load(in_ptr38 + (r2 + (128*x1)), tmp231 & xmask, eviction_policy='evict_last', other=0.0) tmp233 = tl.full([1, 1], 38, tl.int64) tmp234 = tmp0 >= tmp233 tmp235 = tmp0 < tmp228 tmp236 = tmp234 & tmp235 tmp237 = tl.load(in_ptr39 + (r2 + (128*x1)), tmp236 & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tmp0 >= tmp200 tmp239 = tmp0 < tmp233 tmp240 = tmp238 & tmp239 tmp241 = tl.load(in_ptr40 + (r2 + (128*x1)), tmp240 & xmask, eviction_policy='evict_last', other=0.0) tmp242 = tl.where(tmp240, tmp241, tmp221) tmp243 = tl.where(tmp236, tmp237, tmp242) tmp244 = tl.where(tmp231, tmp232, tmp243) tmp245 = tl.where(tmp226, tmp227, tmp244) tmp246 = tl.full([1, 1], 44, tl.int64) tmp247 = tmp0 >= tmp246 tmp248 = tl.full([1, 1], 45, tl.int64) tmp249 = tmp0 < tmp248 tmp250 = tmp247 & tmp249 tmp251 = tl.load(in_ptr41 + (r2 + (128*x1)), tmp250 & xmask, eviction_policy='evict_last', other=0.0) tmp252 = tl.full([1, 1], 43, tl.int64) tmp253 = tmp0 >= tmp252 tmp254 = tmp0 < tmp246 tmp255 = tmp253 & tmp254 tmp256 = tl.load(in_ptr42 + (r2 + (128*x1)), tmp255 & xmask, eviction_policy='evict_last', other=0.0) tmp257 = tl.full([1, 1], 42, tl.int64) tmp258 = tmp0 >= tmp257 tmp259 = tmp0 < tmp252 tmp260 = tmp258 & tmp259 tmp261 = tl.load(in_ptr43 + (r2 + (128*x1)), tmp260 & xmask, eviction_policy='evict_last', other=0.0) tmp262 = tmp0 >= tmp224 tmp263 = tmp0 < tmp257 tmp264 = tmp262 & tmp263 tmp265 = tl.load(in_ptr44 + (r2 + (128*x1)), tmp264 & xmask, eviction_policy='evict_last', other=0.0) tmp266 = tl.where(tmp264, tmp265, tmp245) tmp267 = tl.where(tmp260, tmp261, tmp266) tmp268 = tl.where(tmp255, tmp256, tmp267) tmp269 = tl.where(tmp250, tmp251, tmp268) tmp270 = tl.full([1, 1], 48, tl.int64) tmp271 = tmp0 >= tmp270 tmp272 = tl.full([1, 1], 49, tl.int64) tmp273 = tmp0 < tmp272 tmp274 = tmp271 & tmp273 tmp275 = tl.load(in_ptr45 + (r2 + (128*x1)), tmp274 & xmask, eviction_policy='evict_last', other=0.0) tmp276 = tl.full([1, 1], 47, tl.int64) tmp277 = tmp0 >= tmp276 tmp278 = tmp0 < tmp270 tmp279 = tmp277 & tmp278 tmp280 = tl.load(in_ptr46 + (r2 + (128*x1)), tmp279 & xmask, eviction_policy='evict_last', other=0.0) tmp281 = tl.full([1, 1], 46, tl.int64) tmp282 = tmp0 >= tmp281 tmp283 = tmp0 < tmp276 tmp284 = tmp282 & tmp283 tmp285 = tl.load(in_ptr47 + (r2 + (128*x1)), tmp284 & xmask, eviction_policy='evict_last', other=0.0) tmp286 = tmp0 >= tmp248 tmp287 = tmp0 < tmp281 tmp288 = tmp286 & tmp287 tmp289 = tl.load(in_ptr48 + (r2 + (128*x1)), tmp288 & xmask, eviction_policy='evict_last', other=0.0) tmp290 = tl.where(tmp288, tmp289, tmp269) tmp291 = tl.where(tmp284, tmp285, tmp290) tmp292 = tl.where(tmp279, tmp280, tmp291) tmp293 = tl.where(tmp274, tmp275, tmp292) tmp294 = tl.full([1, 1], 52, tl.int64) tmp295 = tmp0 >= tmp294 tmp296 = tl.full([1, 1], 53, tl.int64) tmp297 = tmp0 < tmp296 tmp298 = tmp295 & tmp297 tmp299 = tl.load(in_ptr49 + (r2 + (128*x1)), tmp298 & xmask, eviction_policy='evict_last', other=0.0) tmp300 = tl.full([1, 1], 51, tl.int64) tmp301 = tmp0 >= tmp300 tmp302 = tmp0 < tmp294 tmp303 = tmp301 & tmp302 tmp304 = tl.load(in_ptr50 + (r2 + (128*x1)), tmp303 & xmask, eviction_policy='evict_last', other=0.0) tmp305 = tl.full([1, 1], 50, tl.int64) tmp306 = tmp0 >= tmp305 tmp307 = tmp0 < tmp300 tmp308 = tmp306 & tmp307 tmp309 = tl.load(in_ptr51 + (r2 + (128*x1)), tmp308 & xmask, eviction_policy='evict_last', other=0.0) tmp310 = tmp0 >= tmp272 tmp311 = tmp0 < tmp305 tmp312 = tmp310 & tmp311 tmp313 = tl.load(in_ptr52 + (r2 + (128*x1)), tmp312 & xmask, eviction_policy='evict_last', other=0.0) tmp314 = tl.where(tmp312, tmp313, tmp293) tmp315 = tl.where(tmp308, tmp309, tmp314) tmp316 = tl.where(tmp303, tmp304, tmp315) tmp317 = tl.where(tmp298, tmp299, tmp316) tmp318 = tl.full([1, 1], 56, tl.int64) tmp319 = tmp0 >= tmp318 tmp320 = tl.full([1, 1], 57, tl.int64) tmp321 = tmp0 < tmp320 tmp322 = tmp319 & tmp321 tmp323 = tl.load(in_ptr53 + (r2 + (128*x1)), tmp322 & xmask, eviction_policy='evict_last', other=0.0) tmp324 = tl.full([1, 1], 55, tl.int64) tmp325 = tmp0 >= tmp324 tmp326 = tmp0 < tmp318 tmp327 = tmp325 & tmp326 tmp328 = tl.load(in_ptr54 + (r2 + (128*x1)), tmp327 & xmask, eviction_policy='evict_last', other=0.0) tmp329 = tl.full([1, 1], 54, tl.int64) tmp330 = tmp0 >= tmp329 tmp331 = tmp0 < tmp324 tmp332 = tmp330 & tmp331 tmp333 = tl.load(in_ptr55 + (r2 + (128*x1)), tmp332 & xmask, eviction_policy='evict_last', other=0.0) tmp334 = tmp0 >= tmp296 tmp335 = tmp0 < tmp329 tmp336 = tmp334 & tmp335 tmp337 = tl.load(in_ptr56 + (r2 + (128*x1)), tmp336 & xmask, eviction_policy='evict_last', other=0.0) tmp338 = tl.where(tmp336, tmp337, tmp317) tmp339 = tl.where(tmp332, tmp333, tmp338) tmp340 = tl.where(tmp327, tmp328, tmp339) tmp341 = tl.where(tmp322, tmp323, tmp340) tmp342 = tl.full([1, 1], 60, tl.int64) tmp343 = tmp0 >= tmp342 tmp344 = tl.full([1, 1], 61, tl.int64) tmp345 = tmp0 < tmp344 tmp346 = tmp343 & tmp345 tmp347 = tl.load(in_ptr57 + (r2 + (128*x1)), tmp346 & xmask, eviction_policy='evict_last', other=0.0) tmp348 = tl.full([1, 1], 59, tl.int64) tmp349 = tmp0 >= tmp348 tmp350 = tmp0 < tmp342 tmp351 = tmp349 & tmp350 tmp352 = tl.load(in_ptr58 + (r2 + (128*x1)), tmp351 & xmask, eviction_policy='evict_last', other=0.0) tmp353 = tl.full([1, 1], 58, tl.int64) tmp354 = tmp0 >= tmp353 tmp355 = tmp0 < tmp348 tmp356 = tmp354 & tmp355 tmp357 = tl.load(in_ptr59 + (r2 + (128*x1)), tmp356 & xmask, eviction_policy='evict_last', other=0.0) tmp358 = tmp0 >= tmp320 tmp359 = tmp0 < tmp353 tmp360 = tmp358 & tmp359 tmp361 = tl.load(in_ptr60 + (r2 + (128*x1)), tmp360 & xmask, eviction_policy='evict_last', other=0.0) tmp362 = tl.where(tmp360, tmp361, tmp341) tmp363 = tl.where(tmp356, tmp357, tmp362) tmp364 = tl.where(tmp351, tmp352, tmp363) tmp365 = tl.where(tmp346, tmp347, tmp364) tmp366 = tl.full([1, 1], 63, tl.int64) tmp367 = tmp0 >= tmp366 tmp368 = tl.load(in_ptr61 + (r2 + (128*x1)), tmp367 & xmask, eviction_policy='evict_last', other=0.0) tmp369 = tl.full([1, 1], 62, tl.int64) tmp370 = tmp0 >= tmp369 tmp371 = tmp0 < tmp366 tmp372 = tmp370 & tmp371 tmp373 = tl.load(in_ptr62 + (r2 + (128*x1)), tmp372 & xmask, eviction_policy='evict_last', other=0.0) tmp374 = tmp0 >= tmp344 tmp375 = tmp0 < tmp369 tmp376 = tmp374 & tmp375 tmp377 = tl.load(in_ptr63 + (r2 + (128*x1)), tmp376 & xmask, eviction_policy='evict_last', other=0.0) tmp378 = tl.where(tmp376, tmp377, tmp365) tmp379 = tl.where(tmp372, tmp373, tmp378) tmp380 = tl.where(tmp367, tmp368, tmp379) tmp381 = tmp380 * tmp380 tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK]) tmp384 = tl.where(xmask, tmp382, 0) tmp385 = tl.sum(tmp384, 1)[:, None] tmp386 = libdevice.sqrt(tmp385) tl.store(in_out_ptr0 + (r2 + (128*x3)), tmp380, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp386, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/t2/ct2f2hshheqz3ic3c445uhobgzuy7jfkonekptjcv4yqglojftmh.py # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm, aten.div] # Source node to ATen node mapping: # vlad_3 => div_3, pow_5, pow_6, sum_68 # Graph fragment: # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 2), kwargs = {}) # %sum_68 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {}) # %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_68, 0.5), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %expand_66), kwargs = {}) triton_red_fused_div_linalg_vector_norm_7 = async_compile.triton('triton_red_fused_div_linalg_vector_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[4, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_div_linalg_vector_norm_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 4 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + ((64*x0) + (r1 // 128)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tmp9 = libdevice.sqrt(tmp7) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tl.load(in_ptr1 + ((64*x0) + (r1 // 128)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp10 / tmp13 tmp15 = triton_helpers.maximum(tmp9, tmp12) tmp16 = tmp14 / tmp15 tl.store(out_ptr0 + (r1 + (8192*x0)), tmp16, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_3, (64, 128), (128, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm] stream0 = get_raw_stream(0) triton_red_fused_linalg_vector_norm_0.run(primals_1, buf0, 16384, 128, grid=grid(16384), stream=stream0) buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf8 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf10 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf12 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf17 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf19 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf21 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf24 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf26 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf28 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf30 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf33 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf35 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf37 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf39 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf42 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf44 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf46 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf48 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf51 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf53 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf55 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf57 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf60 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf62 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf64 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf66 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf69 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf71 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf73 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf75 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf78 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf80 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf82 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf84 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf87 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf89 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf91 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf93 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf96 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf98 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf100 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf102 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf105 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf107 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf109 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf111 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf114 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf116 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf118 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf120 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf123 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf125 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf127 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf129 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf132 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf134 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf136 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf138 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf141 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf143 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf145 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) # Topologically Sorted Source Nodes: [x, residual_2, residual_4, residual_6, residual_8, residual_10, residual_12, residual_14, residual_16, residual_18, residual_20, residual_22, residual_24, residual_26, residual_28, residual_30, residual_32, residual_34, residual_36, residual_38, residual_40, residual_42, residual_44, residual_46, residual_48, residual_50, residual_52, residual_54, residual_56, residual_58, residual_60, residual_62, residual_64, residual_66, residual_68, residual_70, residual_72, residual_74, residual_76, residual_78, residual_80, residual_82, residual_84, residual_86, residual_88, residual_90, residual_92, residual_94, residual_96, residual_98, residual_100, residual_102, residual_104, residual_106, residual_108, residual_110, residual_112, residual_114, residual_116, residual_118, residual_120, residual_122, residual_124, residual_126], Original ATen: [aten.div, aten.sub] triton_poi_fused_div_sub_1.run(primals_1, buf0, primals_3, buf1, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, 2097152, grid=grid(2097152), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0); del buf0 # reuse buf4 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) # Topologically Sorted Source Nodes: [soft_assign_1], Original ATen: [aten._softmax] triton_per_fused__softmax_2.run(buf2, buf3, buf4, 16384, 64, grid=grid(16384), stream=stream0) buf5 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf9 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf13 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf16 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf20 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf22 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf25 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf27 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf29 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf31 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf34 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf36 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf38 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf40 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf43 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf45 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf47 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf49 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf52 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf54 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf56 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf58 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf61 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf63 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf65 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf67 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [residual, residual_1, sum_1, residual_3, sum_2, residual_5, sum_3, residual_7, sum_4, residual_9, sum_5, residual_11, sum_6, residual_13, sum_7, residual_15, sum_8, residual_17, sum_9, residual_19, sum_10, residual_21, sum_11, residual_23, sum_12, residual_25, sum_13, residual_27, sum_14, residual_29, sum_15, residual_31, sum_16, residual_33, sum_17, residual_35, sum_18, residual_37, sum_19, residual_39, sum_20, residual_41, sum_21, residual_43, sum_22, residual_45, sum_23, residual_47, sum_24, residual_49, sum_25, residual_51, sum_26, residual_53, sum_27, residual_55, sum_28, residual_57, sum_29], Original ATen: [aten.sub, aten.mul, aten.sum] triton_red_fused_mul_sub_sum_3.run(buf1, primals_3, buf2, buf3, buf4, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf5, buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, 512, 4096, grid=grid(512), stream=stream0) buf70 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf72 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf74 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf76 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf79 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf81 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf83 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf85 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf88 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf90 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf92 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf94 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf97 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf99 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf101 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf103 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf106 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf108 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf110 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf112 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf115 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf117 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf119 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf121 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf124 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf126 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf128 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf130 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [residual_59, sum_30, residual_61, sum_31, residual_63, sum_32, residual_65, sum_33, residual_67, sum_34, residual_69, sum_35, residual_71, sum_36, residual_73, sum_37, residual_75, sum_38, residual_77, sum_39, residual_79, sum_40, residual_81, sum_41, residual_83, sum_42, residual_85, sum_43, residual_87, sum_44, residual_89, sum_45, residual_91, sum_46, residual_93, sum_47, residual_95, sum_48, residual_97, sum_49, residual_99, sum_50, residual_101, sum_51, residual_103, sum_52, residual_105, sum_53, residual_107, sum_54, residual_109, sum_55, residual_111, sum_56, residual_113, sum_57], Original ATen: [aten.mul, aten.sum] triton_red_fused_mul_sum_4.run(buf69, buf2, buf3, buf4, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf70, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, 512, 4096, grid=grid(512), stream=stream0) buf133 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf135 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf137 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf139 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf142 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf144 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf146 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [residual_115, sum_58, residual_117, sum_59, residual_119, sum_60, residual_121, sum_61, residual_123, sum_62, residual_125, sum_63, residual_127, sum_64], Original ATen: [aten.mul, aten.sum] triton_red_fused_mul_sum_5.run(buf132, buf2, buf3, buf4, buf134, buf136, buf138, buf141, buf143, buf145, buf133, buf135, buf137, buf139, buf142, buf144, buf146, 512, 4096, grid=grid(512), stream=stream0) buf14 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32) buf23 = buf14; del buf14 # reuse buf32 = buf23; del buf23 # reuse buf41 = buf32; del buf32 # reuse buf50 = buf41; del buf41 # reuse buf59 = buf50; del buf50 # reuse buf68 = buf59; del buf59 # reuse buf77 = buf68; del buf68 # reuse buf86 = buf77; del buf77 # reuse buf95 = buf86; del buf86 # reuse buf104 = buf95; del buf95 # reuse buf113 = buf104; del buf104 # reuse buf122 = buf113; del buf113 # reuse buf131 = buf122; del buf122 # reuse buf140 = buf131; del buf131 # reuse buf147 = buf140; del buf140 # reuse buf148 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32) buf149 = reinterpret_tensor(buf148, (4, 64, 1), (64, 1, 1), 0); del buf148 # reuse # Topologically Sorted Source Nodes: [vlad, setitem, setitem_1, setitem_2, setitem_3, setitem_4, setitem_5, setitem_6, setitem_7, setitem_8, setitem_9, setitem_10, setitem_11, setitem_12, setitem_13, setitem_14, setitem_15, setitem_16, setitem_17, setitem_18, setitem_19, setitem_20, setitem_21, setitem_22, setitem_23, setitem_24, setitem_25, setitem_26, setitem_27, setitem_28, setitem_29, setitem_30, setitem_31, setitem_32, setitem_33, setitem_34, setitem_35, setitem_36, setitem_37, setitem_38, setitem_39, setitem_40, setitem_41, setitem_42, setitem_43, setitem_44, setitem_45, setitem_46, setitem_47, setitem_48, setitem_49, setitem_50, setitem_51, setitem_52, setitem_53, setitem_54, setitem_55, setitem_56, setitem_57, setitem_58, setitem_59, setitem_60, setitem_61, setitem_62, setitem_63, vlad_1], Original ATen: [aten.zeros, aten.copy, aten.linalg_vector_norm] triton_per_fused_copy_linalg_vector_norm_zeros_6.run(buf147, buf149, buf13, buf11, buf9, buf7, buf5, buf22, buf20, buf18, buf16, buf31, buf29, buf27, buf25, buf40, buf38, buf36, buf34, buf49, buf47, buf45, buf43, buf58, buf56, buf54, buf52, buf67, buf65, buf63, buf61, buf76, buf74, buf72, buf70, buf85, buf83, buf81, buf79, buf94, buf92, buf90, buf88, buf103, buf101, buf99, buf97, buf112, buf110, buf108, buf106, buf121, buf119, buf117, buf115, buf130, buf128, buf126, buf124, buf139, buf137, buf135, buf133, buf146, buf144, buf142, 256, 128, grid=grid(256), stream=stream0) del buf101 del buf103 del buf106 del buf108 del buf11 del buf110 del buf112 del buf115 del buf117 del buf119 del buf121 del buf124 del buf126 del buf128 del buf13 del buf130 del buf133 del buf135 del buf137 del buf139 del buf142 del buf144 del buf146 del buf16 del buf18 del buf20 del buf22 del buf25 del buf27 del buf29 del buf31 del buf34 del buf36 del buf38 del buf40 del buf43 del buf45 del buf47 del buf49 del buf5 del buf52 del buf54 del buf56 del buf58 del buf61 del buf63 del buf65 del buf67 del buf7 del buf70 del buf72 del buf74 del buf76 del buf79 del buf81 del buf83 del buf85 del buf88 del buf9 del buf90 del buf92 del buf94 del buf97 del buf99 buf150 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf151 = reinterpret_tensor(buf150, (4, 1), (1, 1), 0); del buf150 # reuse buf152 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32) # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm, aten.div] triton_red_fused_div_linalg_vector_norm_7.run(buf151, buf147, buf149, buf152, 4, 8192, grid=grid(4), stream=stream0) return (buf152, primals_2, buf1, buf2, buf3, buf4, reinterpret_tensor(primals_3, (1, 128), (128, 1), 0), buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, buf147, buf149, buf151, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 128, 64, 64), (524288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from sklearn.neighbors import NearestNeighbors class NetVLAD(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=64, dim=128, normalize_input=True, vladv2=False, use_faiss=True): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. vladv2 : bool If true, use vladv2 otherwise use vladv1 """ super().__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = 0 self.vladv2 = vladv2 self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias= vladv2) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) self.use_faiss = use_faiss def init_params(self, clsts, traindescs): if not self.vladv2: clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True) dots = np.dot(clstsAssign, traindescs.T) dots.sort(0) dots = dots[::-1, :] self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha * clstsAssign).unsqueeze(2).unsqueeze(3)) self.conv.bias = None else: if not self.use_faiss: knn = NearestNeighbors(n_jobs=-1) knn.fit(traindescs) del traindescs ds_sq = np.square(knn.kneighbors(clsts, 2)[1]) del knn else: index = faiss.IndexFlatL2(traindescs.shape[1]) index.add(traindescs) del traindescs ds_sq = np.square(index.search(clsts, 2)[1]) del index self.alpha = (-np.log(0.01) / np.mean(ds_sq[:, 1] - ds_sq[:, 0]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) del clsts, ds_sq self.conv.weight = nn.Parameter((2.0 * self.alpha * self. centroids).unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm (dim=1)) def forward(self, x): N, C = x.shape[:2] if self.normalize_input: x = F.normalize(x, p=2, dim=1) soft_assign = self.conv(x).view(N, self.num_clusters, -1) soft_assign = F.softmax(soft_assign, dim=1) x_flatten = x.view(N, C, -1) vlad = torch.zeros([N, self.num_clusters, C], dtype=x.dtype, layout =x.layout, device=x.device) for C in range(self.num_clusters): residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3 ) - self.centroids[C:C + 1, :].expand(x_flatten.size(-1), - 1, -1).permute(1, 2, 0).unsqueeze(0) residual *= soft_assign[:, C:C + 1, :].unsqueeze(2) vlad[:, C:C + 1, :] = residual.sum(dim=-1) vlad = F.normalize(vlad, p=2, dim=2) vlad = vlad.view(x.size(0), -1) vlad = F.normalize(vlad, p=2, dim=1) return vlad def get_inputs(): return [torch.rand([4, 128, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn from sklearn.neighbors import NearestNeighbors assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = xindex // 4096 _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 524288 * x1), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61, out_ptr62, out_ptr63, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 524288 x1 = xindex // 4096 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last') tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last') tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last') tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last') tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last') tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last') tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last') tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last') tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last') tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last') tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last') tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last') tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last') tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last') tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last') tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last') tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last') tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last') tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last') tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last') tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last') tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last') tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last') tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last') tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last') tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last') tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last') tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last') tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last') tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last') tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last') tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last') tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp7 = tmp5 - tmp6 tmp9 = tmp5 - tmp8 tmp11 = tmp5 - tmp10 tmp13 = tmp5 - tmp12 tmp15 = tmp5 - tmp14 tmp17 = tmp5 - tmp16 tmp19 = tmp5 - tmp18 tmp21 = tmp5 - tmp20 tmp23 = tmp5 - tmp22 tmp25 = tmp5 - tmp24 tmp27 = tmp5 - tmp26 tmp29 = tmp5 - tmp28 tmp31 = tmp5 - tmp30 tmp33 = tmp5 - tmp32 tmp35 = tmp5 - tmp34 tmp37 = tmp5 - tmp36 tmp39 = tmp5 - tmp38 tmp41 = tmp5 - tmp40 tmp43 = tmp5 - tmp42 tmp45 = tmp5 - tmp44 tmp47 = tmp5 - tmp46 tmp49 = tmp5 - tmp48 tmp51 = tmp5 - tmp50 tmp53 = tmp5 - tmp52 tmp55 = tmp5 - tmp54 tmp57 = tmp5 - tmp56 tmp59 = tmp5 - tmp58 tmp61 = tmp5 - tmp60 tmp63 = tmp5 - tmp62 tmp65 = tmp5 - tmp64 tmp67 = tmp5 - tmp66 tmp69 = tmp5 - tmp68 tmp71 = tmp5 - tmp70 tmp73 = tmp5 - tmp72 tmp75 = tmp5 - tmp74 tmp77 = tmp5 - tmp76 tmp79 = tmp5 - tmp78 tmp81 = tmp5 - tmp80 tmp83 = tmp5 - tmp82 tmp85 = tmp5 - tmp84 tmp87 = tmp5 - tmp86 tmp89 = tmp5 - tmp88 tmp91 = tmp5 - tmp90 tmp93 = tmp5 - tmp92 tmp95 = tmp5 - tmp94 tmp97 = tmp5 - tmp96 tmp99 = tmp5 - tmp98 tmp101 = tmp5 - tmp100 tmp103 = tmp5 - tmp102 tmp105 = tmp5 - tmp104 tmp107 = tmp5 - tmp106 tmp109 = tmp5 - tmp108 tmp111 = tmp5 - tmp110 tmp113 = tmp5 - tmp112 tmp115 = tmp5 - tmp114 tmp117 = tmp5 - tmp116 tmp119 = tmp5 - tmp118 tmp121 = tmp5 - tmp120 tmp123 = tmp5 - tmp122 tmp125 = tmp5 - tmp124 tmp127 = tmp5 - tmp126 tmp129 = tmp5 - tmp128 tmp131 = tmp5 - tmp130 tl.store(out_ptr0 + x3, tmp5, None) tl.store(out_ptr1 + x3, tmp7, None) tl.store(out_ptr2 + x3, tmp9, None) tl.store(out_ptr3 + x3, tmp11, None) tl.store(out_ptr4 + x3, tmp13, None) tl.store(out_ptr5 + x3, tmp15, None) tl.store(out_ptr6 + x3, tmp17, None) tl.store(out_ptr7 + x3, tmp19, None) tl.store(out_ptr8 + x3, tmp21, None) tl.store(out_ptr9 + x3, tmp23, None) tl.store(out_ptr10 + x3, tmp25, None) tl.store(out_ptr11 + x3, tmp27, None) tl.store(out_ptr12 + x3, tmp29, None) tl.store(out_ptr13 + x3, tmp31, None) tl.store(out_ptr14 + x3, tmp33, None) tl.store(out_ptr15 + x3, tmp35, None) tl.store(out_ptr16 + x3, tmp37, None) tl.store(out_ptr17 + x3, tmp39, None) tl.store(out_ptr18 + x3, tmp41, None) tl.store(out_ptr19 + x3, tmp43, None) tl.store(out_ptr20 + x3, tmp45, None) tl.store(out_ptr21 + x3, tmp47, None) tl.store(out_ptr22 + x3, tmp49, None) tl.store(out_ptr23 + x3, tmp51, None) tl.store(out_ptr24 + x3, tmp53, None) tl.store(out_ptr25 + x3, tmp55, None) tl.store(out_ptr26 + x3, tmp57, None) tl.store(out_ptr27 + x3, tmp59, None) tl.store(out_ptr28 + x3, tmp61, None) tl.store(out_ptr29 + x3, tmp63, None) tl.store(out_ptr30 + x3, tmp65, None) tl.store(out_ptr31 + x3, tmp67, None) tl.store(out_ptr32 + x3, tmp69, None) tl.store(out_ptr33 + x3, tmp71, None) tl.store(out_ptr34 + x3, tmp73, None) tl.store(out_ptr35 + x3, tmp75, None) tl.store(out_ptr36 + x3, tmp77, None) tl.store(out_ptr37 + x3, tmp79, None) tl.store(out_ptr38 + x3, tmp81, None) tl.store(out_ptr39 + x3, tmp83, None) tl.store(out_ptr40 + x3, tmp85, None) tl.store(out_ptr41 + x3, tmp87, None) tl.store(out_ptr42 + x3, tmp89, None) tl.store(out_ptr43 + x3, tmp91, None) tl.store(out_ptr44 + x3, tmp93, None) tl.store(out_ptr45 + x3, tmp95, None) tl.store(out_ptr46 + x3, tmp97, None) tl.store(out_ptr47 + x3, tmp99, None) tl.store(out_ptr48 + x3, tmp101, None) tl.store(out_ptr49 + x3, tmp103, None) tl.store(out_ptr50 + x3, tmp105, None) tl.store(out_ptr51 + x3, tmp107, None) tl.store(out_ptr52 + x3, tmp109, None) tl.store(out_ptr53 + x3, tmp111, None) tl.store(out_ptr54 + x3, tmp113, None) tl.store(out_ptr55 + x3, tmp115, None) tl.store(out_ptr56 + x3, tmp117, None) tl.store(out_ptr57 + x3, tmp119, None) tl.store(out_ptr58 + x3, tmp121, None) tl.store(out_ptr59 + x3, tmp123, None) tl.store(out_ptr60 + x3, tmp125, None) tl.store(out_ptr61 + x3, tmp127, None) tl.store(out_ptr62 + x3, tmp129, None) tl.store(out_ptr63 + x3, tmp131, None) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = xindex // 4096 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 262144 * x1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, None) tl.store(out_ptr1 + x3, tmp8, None) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 128 tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') x1 = xindex // 128 _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp3 = tl.load(in_ptr2 + (r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp14 = tl.load(in_ptr2 + (4096 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp23 = tl.load(in_ptr2 + (8192 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp32 = tl.load(in_ptr2 + (12288 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp41 = tl.load(in_ptr2 + (16384 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp49 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp50 = tl.load(in_ptr2 + (20480 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp58 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp59 = tl.load(in_ptr2 + (24576 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp67 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp68 = tl.load(in_ptr2 + (28672 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp76 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp77 = tl.load(in_ptr2 + (32768 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp86 = tl.load(in_ptr2 + (36864 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp95 = tl.load(in_ptr2 + (40960 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp103 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp104 = tl.load(in_ptr2 + (45056 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp112 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp113 = tl.load(in_ptr2 + (49152 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp121 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp122 = tl.load(in_ptr2 + (53248 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp130 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp131 = tl.load(in_ptr2 + (57344 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp139 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp140 = tl.load(in_ptr2 + (61440 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp148 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp149 = tl.load(in_ptr2 + (65536 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp157 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp158 = tl.load(in_ptr2 + (69632 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp167 = tl.load(in_ptr2 + (73728 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp175 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp176 = tl.load(in_ptr2 + (77824 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp184 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp185 = tl.load(in_ptr2 + (81920 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp193 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp194 = tl.load(in_ptr2 + (86016 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp202 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp203 = tl.load(in_ptr2 + (90112 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp211 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp212 = tl.load(in_ptr2 + (94208 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp220 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp221 = tl.load(in_ptr2 + (98304 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp229 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp230 = tl.load(in_ptr2 + (102400 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp239 = tl.load(in_ptr2 + (106496 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp247 = tl.load(in_ptr31 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp248 = tl.load(in_ptr2 + (110592 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp256 = tl.load(in_ptr32 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp257 = tl.load(in_ptr2 + (114688 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask & xmask, tmp12, _tmp11) tmp15 = tmp14 - tmp4 tmp16 = tl_math.exp(tmp15) tmp17 = tmp16 / tmp7 tmp18 = tmp13 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask & xmask, tmp21, _tmp20) tmp24 = tmp23 - tmp4 tmp25 = tl_math.exp(tmp24) tmp26 = tmp25 / tmp7 tmp27 = tmp22 * tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp33 = tmp32 - tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp34 / tmp7 tmp36 = tmp31 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = _tmp38 + tmp37 _tmp38 = tl.where(rmask & xmask, tmp39, _tmp38) tmp42 = tmp41 - tmp4 tmp43 = tl_math.exp(tmp42) tmp44 = tmp43 / tmp7 tmp45 = tmp40 * tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp48 = _tmp47 + tmp46 _tmp47 = tl.where(rmask & xmask, tmp48, _tmp47) tmp51 = tmp50 - tmp4 tmp52 = tl_math.exp(tmp51) tmp53 = tmp52 / tmp7 tmp54 = tmp49 * tmp53 tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp57 = _tmp56 + tmp55 _tmp56 = tl.where(rmask & xmask, tmp57, _tmp56) tmp60 = tmp59 - tmp4 tmp61 = tl_math.exp(tmp60) tmp62 = tmp61 / tmp7 tmp63 = tmp58 * tmp62 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask & xmask, tmp66, _tmp65) tmp69 = tmp68 - tmp4 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 / tmp7 tmp72 = tmp67 * tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = _tmp74 + tmp73 _tmp74 = tl.where(rmask & xmask, tmp75, _tmp74) tmp78 = tmp77 - tmp4 tmp79 = tl_math.exp(tmp78) tmp80 = tmp79 / tmp7 tmp81 = tmp76 * tmp80 tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK]) tmp84 = _tmp83 + tmp82 _tmp83 = tl.where(rmask & xmask, tmp84, _tmp83) tmp87 = tmp86 - tmp4 tmp88 = tl_math.exp(tmp87) tmp89 = tmp88 / tmp7 tmp90 = tmp85 * tmp89 tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK]) tmp93 = _tmp92 + tmp91 _tmp92 = tl.where(rmask & xmask, tmp93, _tmp92) tmp96 = tmp95 - tmp4 tmp97 = tl_math.exp(tmp96) tmp98 = tmp97 / tmp7 tmp99 = tmp94 * tmp98 tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK]) tmp102 = _tmp101 + tmp100 _tmp101 = tl.where(rmask & xmask, tmp102, _tmp101) tmp105 = tmp104 - tmp4 tmp106 = tl_math.exp(tmp105) tmp107 = tmp106 / tmp7 tmp108 = tmp103 * tmp107 tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK]) tmp111 = _tmp110 + tmp109 _tmp110 = tl.where(rmask & xmask, tmp111, _tmp110) tmp114 = tmp113 - tmp4 tmp115 = tl_math.exp(tmp114) tmp116 = tmp115 / tmp7 tmp117 = tmp112 * tmp116 tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK]) tmp120 = _tmp119 + tmp118 _tmp119 = tl.where(rmask & xmask, tmp120, _tmp119) tmp123 = tmp122 - tmp4 tmp124 = tl_math.exp(tmp123) tmp125 = tmp124 / tmp7 tmp126 = tmp121 * tmp125 tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK]) tmp129 = _tmp128 + tmp127 _tmp128 = tl.where(rmask & xmask, tmp129, _tmp128) tmp132 = tmp131 - tmp4 tmp133 = tl_math.exp(tmp132) tmp134 = tmp133 / tmp7 tmp135 = tmp130 * tmp134 tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK]) tmp138 = _tmp137 + tmp136 _tmp137 = tl.where(rmask & xmask, tmp138, _tmp137) tmp141 = tmp140 - tmp4 tmp142 = tl_math.exp(tmp141) tmp143 = tmp142 / tmp7 tmp144 = tmp139 * tmp143 tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK]) tmp147 = _tmp146 + tmp145 _tmp146 = tl.where(rmask & xmask, tmp147, _tmp146) tmp150 = tmp149 - tmp4 tmp151 = tl_math.exp(tmp150) tmp152 = tmp151 / tmp7 tmp153 = tmp148 * tmp152 tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK]) tmp156 = _tmp155 + tmp154 _tmp155 = tl.where(rmask & xmask, tmp156, _tmp155) tmp159 = tmp158 - tmp4 tmp160 = tl_math.exp(tmp159) tmp161 = tmp160 / tmp7 tmp162 = tmp157 * tmp161 tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK]) tmp165 = _tmp164 + tmp163 _tmp164 = tl.where(rmask & xmask, tmp165, _tmp164) tmp168 = tmp167 - tmp4 tmp169 = tl_math.exp(tmp168) tmp170 = tmp169 / tmp7 tmp171 = tmp166 * tmp170 tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK]) tmp174 = _tmp173 + tmp172 _tmp173 = tl.where(rmask & xmask, tmp174, _tmp173) tmp177 = tmp176 - tmp4 tmp178 = tl_math.exp(tmp177) tmp179 = tmp178 / tmp7 tmp180 = tmp175 * tmp179 tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK]) tmp183 = _tmp182 + tmp181 _tmp182 = tl.where(rmask & xmask, tmp183, _tmp182) tmp186 = tmp185 - tmp4 tmp187 = tl_math.exp(tmp186) tmp188 = tmp187 / tmp7 tmp189 = tmp184 * tmp188 tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK]) tmp192 = _tmp191 + tmp190 _tmp191 = tl.where(rmask & xmask, tmp192, _tmp191) tmp195 = tmp194 - tmp4 tmp196 = tl_math.exp(tmp195) tmp197 = tmp196 / tmp7 tmp198 = tmp193 * tmp197 tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK]) tmp201 = _tmp200 + tmp199 _tmp200 = tl.where(rmask & xmask, tmp201, _tmp200) tmp204 = tmp203 - tmp4 tmp205 = tl_math.exp(tmp204) tmp206 = tmp205 / tmp7 tmp207 = tmp202 * tmp206 tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK]) tmp210 = _tmp209 + tmp208 _tmp209 = tl.where(rmask & xmask, tmp210, _tmp209) tmp213 = tmp212 - tmp4 tmp214 = tl_math.exp(tmp213) tmp215 = tmp214 / tmp7 tmp216 = tmp211 * tmp215 tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK]) tmp219 = _tmp218 + tmp217 _tmp218 = tl.where(rmask & xmask, tmp219, _tmp218) tmp222 = tmp221 - tmp4 tmp223 = tl_math.exp(tmp222) tmp224 = tmp223 / tmp7 tmp225 = tmp220 * tmp224 tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK]) tmp228 = _tmp227 + tmp226 _tmp227 = tl.where(rmask & xmask, tmp228, _tmp227) tmp231 = tmp230 - tmp4 tmp232 = tl_math.exp(tmp231) tmp233 = tmp232 / tmp7 tmp234 = tmp229 * tmp233 tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK]) tmp237 = _tmp236 + tmp235 _tmp236 = tl.where(rmask & xmask, tmp237, _tmp236) tmp240 = tmp239 - tmp4 tmp241 = tl_math.exp(tmp240) tmp242 = tmp241 / tmp7 tmp243 = tmp238 * tmp242 tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK]) tmp246 = _tmp245 + tmp244 _tmp245 = tl.where(rmask & xmask, tmp246, _tmp245) tmp249 = tmp248 - tmp4 tmp250 = tl_math.exp(tmp249) tmp251 = tmp250 / tmp7 tmp252 = tmp247 * tmp251 tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK]) tmp255 = _tmp254 + tmp253 _tmp254 = tl.where(rmask & xmask, tmp255, _tmp254) tmp258 = tmp257 - tmp4 tmp259 = tl_math.exp(tmp258) tmp260 = tmp259 / tmp7 tmp261 = tmp256 * tmp260 tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK]) tmp264 = _tmp263 + tmp262 _tmp263 = tl.where(rmask & xmask, tmp264, _tmp263) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + x3, tmp11, xmask) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr1 + x3, tmp20, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr2 + x3, tmp29, xmask) tmp38 = tl.sum(_tmp38, 1)[:, None] tl.store(out_ptr3 + x3, tmp38, xmask) tmp47 = tl.sum(_tmp47, 1)[:, None] tl.store(out_ptr4 + x3, tmp47, xmask) tmp56 = tl.sum(_tmp56, 1)[:, None] tl.store(out_ptr5 + x3, tmp56, xmask) tmp65 = tl.sum(_tmp65, 1)[:, None] tl.store(out_ptr6 + x3, tmp65, xmask) tmp74 = tl.sum(_tmp74, 1)[:, None] tl.store(out_ptr7 + x3, tmp74, xmask) tmp83 = tl.sum(_tmp83, 1)[:, None] tl.store(out_ptr8 + x3, tmp83, xmask) tmp92 = tl.sum(_tmp92, 1)[:, None] tl.store(out_ptr9 + x3, tmp92, xmask) tmp101 = tl.sum(_tmp101, 1)[:, None] tl.store(out_ptr10 + x3, tmp101, xmask) tmp110 = tl.sum(_tmp110, 1)[:, None] tl.store(out_ptr11 + x3, tmp110, xmask) tmp119 = tl.sum(_tmp119, 1)[:, None] tl.store(out_ptr12 + x3, tmp119, xmask) tmp128 = tl.sum(_tmp128, 1)[:, None] tl.store(out_ptr13 + x3, tmp128, xmask) tmp137 = tl.sum(_tmp137, 1)[:, None] tl.store(out_ptr14 + x3, tmp137, xmask) tmp146 = tl.sum(_tmp146, 1)[:, None] tl.store(out_ptr15 + x3, tmp146, xmask) tmp155 = tl.sum(_tmp155, 1)[:, None] tl.store(out_ptr16 + x3, tmp155, xmask) tmp164 = tl.sum(_tmp164, 1)[:, None] tl.store(out_ptr17 + x3, tmp164, xmask) tmp173 = tl.sum(_tmp173, 1)[:, None] tl.store(out_ptr18 + x3, tmp173, xmask) tmp182 = tl.sum(_tmp182, 1)[:, None] tl.store(out_ptr19 + x3, tmp182, xmask) tmp191 = tl.sum(_tmp191, 1)[:, None] tl.store(out_ptr20 + x3, tmp191, xmask) tmp200 = tl.sum(_tmp200, 1)[:, None] tl.store(out_ptr21 + x3, tmp200, xmask) tmp209 = tl.sum(_tmp209, 1)[:, None] tl.store(out_ptr22 + x3, tmp209, xmask) tmp218 = tl.sum(_tmp218, 1)[:, None] tl.store(out_ptr23 + x3, tmp218, xmask) tmp227 = tl.sum(_tmp227, 1)[:, None] tl.store(out_ptr24 + x3, tmp227, xmask) tmp236 = tl.sum(_tmp236, 1)[:, None] tl.store(out_ptr25 + x3, tmp236, xmask) tmp245 = tl.sum(_tmp245, 1)[:, None] tl.store(out_ptr26 + x3, tmp245, xmask) tmp254 = tl.sum(_tmp254, 1)[:, None] tl.store(out_ptr27 + x3, tmp254, xmask) tmp263 = tl.sum(_tmp263, 1)[:, None] tl.store(out_ptr28 + x3, tmp263, xmask) @triton.jit def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = xindex // 128 _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (118784 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (122880 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (126976 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (131072 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (135168 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (139264 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (143360 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp66 = tl.load(in_ptr1 + (147456 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp75 = tl.load(in_ptr1 + (151552 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp83 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp84 = tl.load(in_ptr1 + (155648 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp92 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp93 = tl.load(in_ptr1 + (159744 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp101 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp102 = tl.load(in_ptr1 + (163840 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp110 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp111 = tl.load(in_ptr1 + (167936 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp119 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp120 = tl.load(in_ptr1 + (172032 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp128 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp129 = tl.load(in_ptr1 + (176128 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp138 = tl.load(in_ptr1 + (180224 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp147 = tl.load(in_ptr1 + (184320 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp155 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp156 = tl.load(in_ptr1 + (188416 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp164 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp165 = tl.load(in_ptr1 + (192512 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp173 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp174 = tl.load(in_ptr1 + (196608 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp182 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp183 = tl.load(in_ptr1 + (200704 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp191 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp192 = tl.load(in_ptr1 + (204800 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp200 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp201 = tl.load(in_ptr1 + (208896 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp210 = tl.load(in_ptr1 + (212992 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp219 = tl.load(in_ptr1 + (217088 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp227 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp228 = tl.load(in_ptr1 + (221184 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp236 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp237 = tl.load(in_ptr1 + (225280 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp245 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp246 = tl.load(in_ptr1 + (229376 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp67 = tmp66 - tmp2 tmp68 = tl_math.exp(tmp67) tmp69 = tmp68 / tmp5 tmp70 = tmp65 * tmp69 tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK]) tmp73 = _tmp72 + tmp71 _tmp72 = tl.where(rmask & xmask, tmp73, _tmp72) tmp76 = tmp75 - tmp2 tmp77 = tl_math.exp(tmp76) tmp78 = tmp77 / tmp5 tmp79 = tmp74 * tmp78 tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK]) tmp82 = _tmp81 + tmp80 _tmp81 = tl.where(rmask & xmask, tmp82, _tmp81) tmp85 = tmp84 - tmp2 tmp86 = tl_math.exp(tmp85) tmp87 = tmp86 / tmp5 tmp88 = tmp83 * tmp87 tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK]) tmp91 = _tmp90 + tmp89 _tmp90 = tl.where(rmask & xmask, tmp91, _tmp90) tmp94 = tmp93 - tmp2 tmp95 = tl_math.exp(tmp94) tmp96 = tmp95 / tmp5 tmp97 = tmp92 * tmp96 tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK]) tmp100 = _tmp99 + tmp98 _tmp99 = tl.where(rmask & xmask, tmp100, _tmp99) tmp103 = tmp102 - tmp2 tmp104 = tl_math.exp(tmp103) tmp105 = tmp104 / tmp5 tmp106 = tmp101 * tmp105 tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK]) tmp109 = _tmp108 + tmp107 _tmp108 = tl.where(rmask & xmask, tmp109, _tmp108) tmp112 = tmp111 - tmp2 tmp113 = tl_math.exp(tmp112) tmp114 = tmp113 / tmp5 tmp115 = tmp110 * tmp114 tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK]) tmp118 = _tmp117 + tmp116 _tmp117 = tl.where(rmask & xmask, tmp118, _tmp117) tmp121 = tmp120 - tmp2 tmp122 = tl_math.exp(tmp121) tmp123 = tmp122 / tmp5 tmp124 = tmp119 * tmp123 tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK]) tmp127 = _tmp126 + tmp125 _tmp126 = tl.where(rmask & xmask, tmp127, _tmp126) tmp130 = tmp129 - tmp2 tmp131 = tl_math.exp(tmp130) tmp132 = tmp131 / tmp5 tmp133 = tmp128 * tmp132 tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK]) tmp136 = _tmp135 + tmp134 _tmp135 = tl.where(rmask & xmask, tmp136, _tmp135) tmp139 = tmp138 - tmp2 tmp140 = tl_math.exp(tmp139) tmp141 = tmp140 / tmp5 tmp142 = tmp137 * tmp141 tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK]) tmp145 = _tmp144 + tmp143 _tmp144 = tl.where(rmask & xmask, tmp145, _tmp144) tmp148 = tmp147 - tmp2 tmp149 = tl_math.exp(tmp148) tmp150 = tmp149 / tmp5 tmp151 = tmp146 * tmp150 tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK]) tmp154 = _tmp153 + tmp152 _tmp153 = tl.where(rmask & xmask, tmp154, _tmp153) tmp157 = tmp156 - tmp2 tmp158 = tl_math.exp(tmp157) tmp159 = tmp158 / tmp5 tmp160 = tmp155 * tmp159 tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK]) tmp163 = _tmp162 + tmp161 _tmp162 = tl.where(rmask & xmask, tmp163, _tmp162) tmp166 = tmp165 - tmp2 tmp167 = tl_math.exp(tmp166) tmp168 = tmp167 / tmp5 tmp169 = tmp164 * tmp168 tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK]) tmp172 = _tmp171 + tmp170 _tmp171 = tl.where(rmask & xmask, tmp172, _tmp171) tmp175 = tmp174 - tmp2 tmp176 = tl_math.exp(tmp175) tmp177 = tmp176 / tmp5 tmp178 = tmp173 * tmp177 tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK]) tmp181 = _tmp180 + tmp179 _tmp180 = tl.where(rmask & xmask, tmp181, _tmp180) tmp184 = tmp183 - tmp2 tmp185 = tl_math.exp(tmp184) tmp186 = tmp185 / tmp5 tmp187 = tmp182 * tmp186 tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK]) tmp190 = _tmp189 + tmp188 _tmp189 = tl.where(rmask & xmask, tmp190, _tmp189) tmp193 = tmp192 - tmp2 tmp194 = tl_math.exp(tmp193) tmp195 = tmp194 / tmp5 tmp196 = tmp191 * tmp195 tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK]) tmp199 = _tmp198 + tmp197 _tmp198 = tl.where(rmask & xmask, tmp199, _tmp198) tmp202 = tmp201 - tmp2 tmp203 = tl_math.exp(tmp202) tmp204 = tmp203 / tmp5 tmp205 = tmp200 * tmp204 tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK]) tmp208 = _tmp207 + tmp206 _tmp207 = tl.where(rmask & xmask, tmp208, _tmp207) tmp211 = tmp210 - tmp2 tmp212 = tl_math.exp(tmp211) tmp213 = tmp212 / tmp5 tmp214 = tmp209 * tmp213 tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK]) tmp217 = _tmp216 + tmp215 _tmp216 = tl.where(rmask & xmask, tmp217, _tmp216) tmp220 = tmp219 - tmp2 tmp221 = tl_math.exp(tmp220) tmp222 = tmp221 / tmp5 tmp223 = tmp218 * tmp222 tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK]) tmp226 = _tmp225 + tmp224 _tmp225 = tl.where(rmask & xmask, tmp226, _tmp225) tmp229 = tmp228 - tmp2 tmp230 = tl_math.exp(tmp229) tmp231 = tmp230 / tmp5 tmp232 = tmp227 * tmp231 tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK]) tmp235 = _tmp234 + tmp233 _tmp234 = tl.where(rmask & xmask, tmp235, _tmp234) tmp238 = tmp237 - tmp2 tmp239 = tl_math.exp(tmp238) tmp240 = tmp239 / tmp5 tmp241 = tmp236 * tmp240 tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK]) tmp244 = _tmp243 + tmp242 _tmp243 = tl.where(rmask & xmask, tmp244, _tmp243) tmp247 = tmp246 - tmp2 tmp248 = tl_math.exp(tmp247) tmp249 = tmp248 / tmp5 tmp250 = tmp245 * tmp249 tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK]) tmp253 = _tmp252 + tmp251 _tmp252 = tl.where(rmask & xmask, tmp253, _tmp252) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + x3, tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + x3, tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + x3, tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + x3, tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + x3, tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + x3, tmp63, xmask) tmp72 = tl.sum(_tmp72, 1)[:, None] tl.store(out_ptr7 + x3, tmp72, xmask) tmp81 = tl.sum(_tmp81, 1)[:, None] tl.store(out_ptr8 + x3, tmp81, xmask) tmp90 = tl.sum(_tmp90, 1)[:, None] tl.store(out_ptr9 + x3, tmp90, xmask) tmp99 = tl.sum(_tmp99, 1)[:, None] tl.store(out_ptr10 + x3, tmp99, xmask) tmp108 = tl.sum(_tmp108, 1)[:, None] tl.store(out_ptr11 + x3, tmp108, xmask) tmp117 = tl.sum(_tmp117, 1)[:, None] tl.store(out_ptr12 + x3, tmp117, xmask) tmp126 = tl.sum(_tmp126, 1)[:, None] tl.store(out_ptr13 + x3, tmp126, xmask) tmp135 = tl.sum(_tmp135, 1)[:, None] tl.store(out_ptr14 + x3, tmp135, xmask) tmp144 = tl.sum(_tmp144, 1)[:, None] tl.store(out_ptr15 + x3, tmp144, xmask) tmp153 = tl.sum(_tmp153, 1)[:, None] tl.store(out_ptr16 + x3, tmp153, xmask) tmp162 = tl.sum(_tmp162, 1)[:, None] tl.store(out_ptr17 + x3, tmp162, xmask) tmp171 = tl.sum(_tmp171, 1)[:, None] tl.store(out_ptr18 + x3, tmp171, xmask) tmp180 = tl.sum(_tmp180, 1)[:, None] tl.store(out_ptr19 + x3, tmp180, xmask) tmp189 = tl.sum(_tmp189, 1)[:, None] tl.store(out_ptr20 + x3, tmp189, xmask) tmp198 = tl.sum(_tmp198, 1)[:, None] tl.store(out_ptr21 + x3, tmp198, xmask) tmp207 = tl.sum(_tmp207, 1)[:, None] tl.store(out_ptr22 + x3, tmp207, xmask) tmp216 = tl.sum(_tmp216, 1)[:, None] tl.store(out_ptr23 + x3, tmp216, xmask) tmp225 = tl.sum(_tmp225, 1)[:, None] tl.store(out_ptr24 + x3, tmp225, xmask) tmp234 = tl.sum(_tmp234, 1)[:, None] tl.store(out_ptr25 + x3, tmp234, xmask) tmp243 = tl.sum(_tmp243, 1)[:, None] tl.store(out_ptr26 + x3, tmp243, xmask) tmp252 = tl.sum(_tmp252, 1)[:, None] tl.store(out_ptr27 + x3, tmp252, xmask) @triton.jit def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = xindex // 128 _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (233472 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (237568 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (241664 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (245760 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (249856 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (253952 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (258048 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + x3, tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + x3, tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + x3, tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + x3, tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + x3, tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + x3, tmp63, xmask) @triton.jit def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 64 r2 = rindex x1 = xindex // 64 x3 = xindex tmp0 = x0 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (r2 + 128 * x1), tmp5 & xmask, eviction_policy ='evict_last', other=0.0) tmp7 = tl.full([1, 1], 3, tl.int64) tmp8 = tmp0 >= tmp7 tmp9 = tmp0 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tl.load(in_ptr1 + (r2 + 128 * x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp0 >= tmp12 tmp14 = tmp0 < tmp7 tmp15 = tmp13 & tmp14 tmp16 = tl.load(in_ptr2 + (r2 + 128 * x1), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.full([1, 1], 1, tl.int64) tmp18 = tmp0 >= tmp17 tmp19 = tmp0 < tmp12 tmp20 = tmp18 & tmp19 tmp21 = tl.load(in_ptr3 + (r2 + 128 * x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp0 < tmp17 tmp23 = tl.load(in_ptr4 + (r2 + 128 * x1), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = 0.0 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tl.where(tmp20, tmp21, tmp25) tmp27 = tl.where(tmp15, tmp16, tmp26) tmp28 = tl.where(tmp10, tmp11, tmp27) tmp29 = tl.where(tmp5, tmp6, tmp28) tmp30 = tl.full([1, 1], 8, tl.int64) tmp31 = tmp0 >= tmp30 tmp32 = tl.full([1, 1], 9, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = tl.load(in_ptr5 + (r2 + 128 * x1), tmp34 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tl.full([1, 1], 7, tl.int64) tmp37 = tmp0 >= tmp36 tmp38 = tmp0 < tmp30 tmp39 = tmp37 & tmp38 tmp40 = tl.load(in_ptr6 + (r2 + 128 * x1), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = tl.full([1, 1], 6, tl.int64) tmp42 = tmp0 >= tmp41 tmp43 = tmp0 < tmp36 tmp44 = tmp42 & tmp43 tmp45 = tl.load(in_ptr7 + (r2 + 128 * x1), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp0 >= tmp3 tmp47 = tmp0 < tmp41 tmp48 = tmp46 & tmp47 tmp49 = tl.load(in_ptr8 + (r2 + 128 * x1), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.where(tmp48, tmp49, tmp29) tmp51 = tl.where(tmp44, tmp45, tmp50) tmp52 = tl.where(tmp39, tmp40, tmp51) tmp53 = tl.where(tmp34, tmp35, tmp52) tmp54 = tl.full([1, 1], 12, tl.int64) tmp55 = tmp0 >= tmp54 tmp56 = tl.full([1, 1], 13, tl.int64) tmp57 = tmp0 < tmp56 tmp58 = tmp55 & tmp57 tmp59 = tl.load(in_ptr9 + (r2 + 128 * x1), tmp58 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.full([1, 1], 11, tl.int64) tmp61 = tmp0 >= tmp60 tmp62 = tmp0 < tmp54 tmp63 = tmp61 & tmp62 tmp64 = tl.load(in_ptr10 + (r2 + 128 * x1), tmp63 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.full([1, 1], 10, tl.int64) tmp66 = tmp0 >= tmp65 tmp67 = tmp0 < tmp60 tmp68 = tmp66 & tmp67 tmp69 = tl.load(in_ptr11 + (r2 + 128 * x1), tmp68 & xmask, eviction_policy='evict_last', other=0.0) tmp70 = tmp0 >= tmp32 tmp71 = tmp0 < tmp65 tmp72 = tmp70 & tmp71 tmp73 = tl.load(in_ptr12 + (r2 + 128 * x1), tmp72 & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.where(tmp72, tmp73, tmp53) tmp75 = tl.where(tmp68, tmp69, tmp74) tmp76 = tl.where(tmp63, tmp64, tmp75) tmp77 = tl.where(tmp58, tmp59, tmp76) tmp78 = tl.full([1, 1], 16, tl.int64) tmp79 = tmp0 >= tmp78 tmp80 = tl.full([1, 1], 17, tl.int64) tmp81 = tmp0 < tmp80 tmp82 = tmp79 & tmp81 tmp83 = tl.load(in_ptr13 + (r2 + 128 * x1), tmp82 & xmask, eviction_policy='evict_last', other=0.0) tmp84 = tl.full([1, 1], 15, tl.int64) tmp85 = tmp0 >= tmp84 tmp86 = tmp0 < tmp78 tmp87 = tmp85 & tmp86 tmp88 = tl.load(in_ptr14 + (r2 + 128 * x1), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full([1, 1], 14, tl.int64) tmp90 = tmp0 >= tmp89 tmp91 = tmp0 < tmp84 tmp92 = tmp90 & tmp91 tmp93 = tl.load(in_ptr15 + (r2 + 128 * x1), tmp92 & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tmp0 >= tmp56 tmp95 = tmp0 < tmp89 tmp96 = tmp94 & tmp95 tmp97 = tl.load(in_ptr16 + (r2 + 128 * x1), tmp96 & xmask, eviction_policy='evict_last', other=0.0) tmp98 = tl.where(tmp96, tmp97, tmp77) tmp99 = tl.where(tmp92, tmp93, tmp98) tmp100 = tl.where(tmp87, tmp88, tmp99) tmp101 = tl.where(tmp82, tmp83, tmp100) tmp102 = tl.full([1, 1], 20, tl.int64) tmp103 = tmp0 >= tmp102 tmp104 = tl.full([1, 1], 21, tl.int64) tmp105 = tmp0 < tmp104 tmp106 = tmp103 & tmp105 tmp107 = tl.load(in_ptr17 + (r2 + 128 * x1), tmp106 & xmask, eviction_policy='evict_last', other=0.0) tmp108 = tl.full([1, 1], 19, tl.int64) tmp109 = tmp0 >= tmp108 tmp110 = tmp0 < tmp102 tmp111 = tmp109 & tmp110 tmp112 = tl.load(in_ptr18 + (r2 + 128 * x1), tmp111 & xmask, eviction_policy='evict_last', other=0.0) tmp113 = tl.full([1, 1], 18, tl.int64) tmp114 = tmp0 >= tmp113 tmp115 = tmp0 < tmp108 tmp116 = tmp114 & tmp115 tmp117 = tl.load(in_ptr19 + (r2 + 128 * x1), tmp116 & xmask, eviction_policy='evict_last', other=0.0) tmp118 = tmp0 >= tmp80 tmp119 = tmp0 < tmp113 tmp120 = tmp118 & tmp119 tmp121 = tl.load(in_ptr20 + (r2 + 128 * x1), tmp120 & xmask, eviction_policy='evict_last', other=0.0) tmp122 = tl.where(tmp120, tmp121, tmp101) tmp123 = tl.where(tmp116, tmp117, tmp122) tmp124 = tl.where(tmp111, tmp112, tmp123) tmp125 = tl.where(tmp106, tmp107, tmp124) tmp126 = tl.full([1, 1], 24, tl.int64) tmp127 = tmp0 >= tmp126 tmp128 = tl.full([1, 1], 25, tl.int64) tmp129 = tmp0 < tmp128 tmp130 = tmp127 & tmp129 tmp131 = tl.load(in_ptr21 + (r2 + 128 * x1), tmp130 & xmask, eviction_policy='evict_last', other=0.0) tmp132 = tl.full([1, 1], 23, tl.int64) tmp133 = tmp0 >= tmp132 tmp134 = tmp0 < tmp126 tmp135 = tmp133 & tmp134 tmp136 = tl.load(in_ptr22 + (r2 + 128 * x1), tmp135 & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.full([1, 1], 22, tl.int64) tmp138 = tmp0 >= tmp137 tmp139 = tmp0 < tmp132 tmp140 = tmp138 & tmp139 tmp141 = tl.load(in_ptr23 + (r2 + 128 * x1), tmp140 & xmask, eviction_policy='evict_last', other=0.0) tmp142 = tmp0 >= tmp104 tmp143 = tmp0 < tmp137 tmp144 = tmp142 & tmp143 tmp145 = tl.load(in_ptr24 + (r2 + 128 * x1), tmp144 & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.where(tmp144, tmp145, tmp125) tmp147 = tl.where(tmp140, tmp141, tmp146) tmp148 = tl.where(tmp135, tmp136, tmp147) tmp149 = tl.where(tmp130, tmp131, tmp148) tmp150 = tl.full([1, 1], 28, tl.int64) tmp151 = tmp0 >= tmp150 tmp152 = tl.full([1, 1], 29, tl.int64) tmp153 = tmp0 < tmp152 tmp154 = tmp151 & tmp153 tmp155 = tl.load(in_ptr25 + (r2 + 128 * x1), tmp154 & xmask, eviction_policy='evict_last', other=0.0) tmp156 = tl.full([1, 1], 27, tl.int64) tmp157 = tmp0 >= tmp156 tmp158 = tmp0 < tmp150 tmp159 = tmp157 & tmp158 tmp160 = tl.load(in_ptr26 + (r2 + 128 * x1), tmp159 & xmask, eviction_policy='evict_last', other=0.0) tmp161 = tl.full([1, 1], 26, tl.int64) tmp162 = tmp0 >= tmp161 tmp163 = tmp0 < tmp156 tmp164 = tmp162 & tmp163 tmp165 = tl.load(in_ptr27 + (r2 + 128 * x1), tmp164 & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tmp0 >= tmp128 tmp167 = tmp0 < tmp161 tmp168 = tmp166 & tmp167 tmp169 = tl.load(in_ptr28 + (r2 + 128 * x1), tmp168 & xmask, eviction_policy='evict_last', other=0.0) tmp170 = tl.where(tmp168, tmp169, tmp149) tmp171 = tl.where(tmp164, tmp165, tmp170) tmp172 = tl.where(tmp159, tmp160, tmp171) tmp173 = tl.where(tmp154, tmp155, tmp172) tmp174 = tl.full([1, 1], 32, tl.int64) tmp175 = tmp0 >= tmp174 tmp176 = tl.full([1, 1], 33, tl.int64) tmp177 = tmp0 < tmp176 tmp178 = tmp175 & tmp177 tmp179 = tl.load(in_ptr29 + (r2 + 128 * x1), tmp178 & xmask, eviction_policy='evict_last', other=0.0) tmp180 = tl.full([1, 1], 31, tl.int64) tmp181 = tmp0 >= tmp180 tmp182 = tmp0 < tmp174 tmp183 = tmp181 & tmp182 tmp184 = tl.load(in_ptr30 + (r2 + 128 * x1), tmp183 & xmask, eviction_policy='evict_last', other=0.0) tmp185 = tl.full([1, 1], 30, tl.int64) tmp186 = tmp0 >= tmp185 tmp187 = tmp0 < tmp180 tmp188 = tmp186 & tmp187 tmp189 = tl.load(in_ptr31 + (r2 + 128 * x1), tmp188 & xmask, eviction_policy='evict_last', other=0.0) tmp190 = tmp0 >= tmp152 tmp191 = tmp0 < tmp185 tmp192 = tmp190 & tmp191 tmp193 = tl.load(in_ptr32 + (r2 + 128 * x1), tmp192 & xmask, eviction_policy='evict_last', other=0.0) tmp194 = tl.where(tmp192, tmp193, tmp173) tmp195 = tl.where(tmp188, tmp189, tmp194) tmp196 = tl.where(tmp183, tmp184, tmp195) tmp197 = tl.where(tmp178, tmp179, tmp196) tmp198 = tl.full([1, 1], 36, tl.int64) tmp199 = tmp0 >= tmp198 tmp200 = tl.full([1, 1], 37, tl.int64) tmp201 = tmp0 < tmp200 tmp202 = tmp199 & tmp201 tmp203 = tl.load(in_ptr33 + (r2 + 128 * x1), tmp202 & xmask, eviction_policy='evict_last', other=0.0) tmp204 = tl.full([1, 1], 35, tl.int64) tmp205 = tmp0 >= tmp204 tmp206 = tmp0 < tmp198 tmp207 = tmp205 & tmp206 tmp208 = tl.load(in_ptr34 + (r2 + 128 * x1), tmp207 & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.full([1, 1], 34, tl.int64) tmp210 = tmp0 >= tmp209 tmp211 = tmp0 < tmp204 tmp212 = tmp210 & tmp211 tmp213 = tl.load(in_ptr35 + (r2 + 128 * x1), tmp212 & xmask, eviction_policy='evict_last', other=0.0) tmp214 = tmp0 >= tmp176 tmp215 = tmp0 < tmp209 tmp216 = tmp214 & tmp215 tmp217 = tl.load(in_ptr36 + (r2 + 128 * x1), tmp216 & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.where(tmp216, tmp217, tmp197) tmp219 = tl.where(tmp212, tmp213, tmp218) tmp220 = tl.where(tmp207, tmp208, tmp219) tmp221 = tl.where(tmp202, tmp203, tmp220) tmp222 = tl.full([1, 1], 40, tl.int64) tmp223 = tmp0 >= tmp222 tmp224 = tl.full([1, 1], 41, tl.int64) tmp225 = tmp0 < tmp224 tmp226 = tmp223 & tmp225 tmp227 = tl.load(in_ptr37 + (r2 + 128 * x1), tmp226 & xmask, eviction_policy='evict_last', other=0.0) tmp228 = tl.full([1, 1], 39, tl.int64) tmp229 = tmp0 >= tmp228 tmp230 = tmp0 < tmp222 tmp231 = tmp229 & tmp230 tmp232 = tl.load(in_ptr38 + (r2 + 128 * x1), tmp231 & xmask, eviction_policy='evict_last', other=0.0) tmp233 = tl.full([1, 1], 38, tl.int64) tmp234 = tmp0 >= tmp233 tmp235 = tmp0 < tmp228 tmp236 = tmp234 & tmp235 tmp237 = tl.load(in_ptr39 + (r2 + 128 * x1), tmp236 & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tmp0 >= tmp200 tmp239 = tmp0 < tmp233 tmp240 = tmp238 & tmp239 tmp241 = tl.load(in_ptr40 + (r2 + 128 * x1), tmp240 & xmask, eviction_policy='evict_last', other=0.0) tmp242 = tl.where(tmp240, tmp241, tmp221) tmp243 = tl.where(tmp236, tmp237, tmp242) tmp244 = tl.where(tmp231, tmp232, tmp243) tmp245 = tl.where(tmp226, tmp227, tmp244) tmp246 = tl.full([1, 1], 44, tl.int64) tmp247 = tmp0 >= tmp246 tmp248 = tl.full([1, 1], 45, tl.int64) tmp249 = tmp0 < tmp248 tmp250 = tmp247 & tmp249 tmp251 = tl.load(in_ptr41 + (r2 + 128 * x1), tmp250 & xmask, eviction_policy='evict_last', other=0.0) tmp252 = tl.full([1, 1], 43, tl.int64) tmp253 = tmp0 >= tmp252 tmp254 = tmp0 < tmp246 tmp255 = tmp253 & tmp254 tmp256 = tl.load(in_ptr42 + (r2 + 128 * x1), tmp255 & xmask, eviction_policy='evict_last', other=0.0) tmp257 = tl.full([1, 1], 42, tl.int64) tmp258 = tmp0 >= tmp257 tmp259 = tmp0 < tmp252 tmp260 = tmp258 & tmp259 tmp261 = tl.load(in_ptr43 + (r2 + 128 * x1), tmp260 & xmask, eviction_policy='evict_last', other=0.0) tmp262 = tmp0 >= tmp224 tmp263 = tmp0 < tmp257 tmp264 = tmp262 & tmp263 tmp265 = tl.load(in_ptr44 + (r2 + 128 * x1), tmp264 & xmask, eviction_policy='evict_last', other=0.0) tmp266 = tl.where(tmp264, tmp265, tmp245) tmp267 = tl.where(tmp260, tmp261, tmp266) tmp268 = tl.where(tmp255, tmp256, tmp267) tmp269 = tl.where(tmp250, tmp251, tmp268) tmp270 = tl.full([1, 1], 48, tl.int64) tmp271 = tmp0 >= tmp270 tmp272 = tl.full([1, 1], 49, tl.int64) tmp273 = tmp0 < tmp272 tmp274 = tmp271 & tmp273 tmp275 = tl.load(in_ptr45 + (r2 + 128 * x1), tmp274 & xmask, eviction_policy='evict_last', other=0.0) tmp276 = tl.full([1, 1], 47, tl.int64) tmp277 = tmp0 >= tmp276 tmp278 = tmp0 < tmp270 tmp279 = tmp277 & tmp278 tmp280 = tl.load(in_ptr46 + (r2 + 128 * x1), tmp279 & xmask, eviction_policy='evict_last', other=0.0) tmp281 = tl.full([1, 1], 46, tl.int64) tmp282 = tmp0 >= tmp281 tmp283 = tmp0 < tmp276 tmp284 = tmp282 & tmp283 tmp285 = tl.load(in_ptr47 + (r2 + 128 * x1), tmp284 & xmask, eviction_policy='evict_last', other=0.0) tmp286 = tmp0 >= tmp248 tmp287 = tmp0 < tmp281 tmp288 = tmp286 & tmp287 tmp289 = tl.load(in_ptr48 + (r2 + 128 * x1), tmp288 & xmask, eviction_policy='evict_last', other=0.0) tmp290 = tl.where(tmp288, tmp289, tmp269) tmp291 = tl.where(tmp284, tmp285, tmp290) tmp292 = tl.where(tmp279, tmp280, tmp291) tmp293 = tl.where(tmp274, tmp275, tmp292) tmp294 = tl.full([1, 1], 52, tl.int64) tmp295 = tmp0 >= tmp294 tmp296 = tl.full([1, 1], 53, tl.int64) tmp297 = tmp0 < tmp296 tmp298 = tmp295 & tmp297 tmp299 = tl.load(in_ptr49 + (r2 + 128 * x1), tmp298 & xmask, eviction_policy='evict_last', other=0.0) tmp300 = tl.full([1, 1], 51, tl.int64) tmp301 = tmp0 >= tmp300 tmp302 = tmp0 < tmp294 tmp303 = tmp301 & tmp302 tmp304 = tl.load(in_ptr50 + (r2 + 128 * x1), tmp303 & xmask, eviction_policy='evict_last', other=0.0) tmp305 = tl.full([1, 1], 50, tl.int64) tmp306 = tmp0 >= tmp305 tmp307 = tmp0 < tmp300 tmp308 = tmp306 & tmp307 tmp309 = tl.load(in_ptr51 + (r2 + 128 * x1), tmp308 & xmask, eviction_policy='evict_last', other=0.0) tmp310 = tmp0 >= tmp272 tmp311 = tmp0 < tmp305 tmp312 = tmp310 & tmp311 tmp313 = tl.load(in_ptr52 + (r2 + 128 * x1), tmp312 & xmask, eviction_policy='evict_last', other=0.0) tmp314 = tl.where(tmp312, tmp313, tmp293) tmp315 = tl.where(tmp308, tmp309, tmp314) tmp316 = tl.where(tmp303, tmp304, tmp315) tmp317 = tl.where(tmp298, tmp299, tmp316) tmp318 = tl.full([1, 1], 56, tl.int64) tmp319 = tmp0 >= tmp318 tmp320 = tl.full([1, 1], 57, tl.int64) tmp321 = tmp0 < tmp320 tmp322 = tmp319 & tmp321 tmp323 = tl.load(in_ptr53 + (r2 + 128 * x1), tmp322 & xmask, eviction_policy='evict_last', other=0.0) tmp324 = tl.full([1, 1], 55, tl.int64) tmp325 = tmp0 >= tmp324 tmp326 = tmp0 < tmp318 tmp327 = tmp325 & tmp326 tmp328 = tl.load(in_ptr54 + (r2 + 128 * x1), tmp327 & xmask, eviction_policy='evict_last', other=0.0) tmp329 = tl.full([1, 1], 54, tl.int64) tmp330 = tmp0 >= tmp329 tmp331 = tmp0 < tmp324 tmp332 = tmp330 & tmp331 tmp333 = tl.load(in_ptr55 + (r2 + 128 * x1), tmp332 & xmask, eviction_policy='evict_last', other=0.0) tmp334 = tmp0 >= tmp296 tmp335 = tmp0 < tmp329 tmp336 = tmp334 & tmp335 tmp337 = tl.load(in_ptr56 + (r2 + 128 * x1), tmp336 & xmask, eviction_policy='evict_last', other=0.0) tmp338 = tl.where(tmp336, tmp337, tmp317) tmp339 = tl.where(tmp332, tmp333, tmp338) tmp340 = tl.where(tmp327, tmp328, tmp339) tmp341 = tl.where(tmp322, tmp323, tmp340) tmp342 = tl.full([1, 1], 60, tl.int64) tmp343 = tmp0 >= tmp342 tmp344 = tl.full([1, 1], 61, tl.int64) tmp345 = tmp0 < tmp344 tmp346 = tmp343 & tmp345 tmp347 = tl.load(in_ptr57 + (r2 + 128 * x1), tmp346 & xmask, eviction_policy='evict_last', other=0.0) tmp348 = tl.full([1, 1], 59, tl.int64) tmp349 = tmp0 >= tmp348 tmp350 = tmp0 < tmp342 tmp351 = tmp349 & tmp350 tmp352 = tl.load(in_ptr58 + (r2 + 128 * x1), tmp351 & xmask, eviction_policy='evict_last', other=0.0) tmp353 = tl.full([1, 1], 58, tl.int64) tmp354 = tmp0 >= tmp353 tmp355 = tmp0 < tmp348 tmp356 = tmp354 & tmp355 tmp357 = tl.load(in_ptr59 + (r2 + 128 * x1), tmp356 & xmask, eviction_policy='evict_last', other=0.0) tmp358 = tmp0 >= tmp320 tmp359 = tmp0 < tmp353 tmp360 = tmp358 & tmp359 tmp361 = tl.load(in_ptr60 + (r2 + 128 * x1), tmp360 & xmask, eviction_policy='evict_last', other=0.0) tmp362 = tl.where(tmp360, tmp361, tmp341) tmp363 = tl.where(tmp356, tmp357, tmp362) tmp364 = tl.where(tmp351, tmp352, tmp363) tmp365 = tl.where(tmp346, tmp347, tmp364) tmp366 = tl.full([1, 1], 63, tl.int64) tmp367 = tmp0 >= tmp366 tmp368 = tl.load(in_ptr61 + (r2 + 128 * x1), tmp367 & xmask, eviction_policy='evict_last', other=0.0) tmp369 = tl.full([1, 1], 62, tl.int64) tmp370 = tmp0 >= tmp369 tmp371 = tmp0 < tmp366 tmp372 = tmp370 & tmp371 tmp373 = tl.load(in_ptr62 + (r2 + 128 * x1), tmp372 & xmask, eviction_policy='evict_last', other=0.0) tmp374 = tmp0 >= tmp344 tmp375 = tmp0 < tmp369 tmp376 = tmp374 & tmp375 tmp377 = tl.load(in_ptr63 + (r2 + 128 * x1), tmp376 & xmask, eviction_policy='evict_last', other=0.0) tmp378 = tl.where(tmp376, tmp377, tmp365) tmp379 = tl.where(tmp372, tmp373, tmp378) tmp380 = tl.where(tmp367, tmp368, tmp379) tmp381 = tmp380 * tmp380 tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK]) tmp384 = tl.where(xmask, tmp382, 0) tmp385 = tl.sum(tmp384, 1)[:, None] tmp386 = libdevice.sqrt(tmp385) tl.store(in_out_ptr0 + (r2 + 128 * x3), tmp380, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp386, xmask) @triton.jit def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 4 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tmp9 = libdevice.sqrt(tmp7) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp10 / tmp13 tmp15 = triton_helpers.maximum(tmp9, tmp12) tmp16 = tmp14 / tmp15 tl.store(out_ptr0 + (r1 + 8192 * x0), tmp16, rmask & xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_3, (64, 128), (128, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) get_raw_stream(0) triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0, 16384, 128, XBLOCK=64, RBLOCK=4, num_warps=8, num_stages=1) buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf8 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf10 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf12 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf17 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf19 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf21 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf24 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf26 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf28 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf30 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf33 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf35 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf37 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf39 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf42 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf44 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf46 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf48 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf51 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf53 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf55 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf57 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf60 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf62 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf64 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf66 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf69 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf71 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf73 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf75 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf78 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf80 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf82 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf84 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf87 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf89 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf91 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf93 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf96 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf98 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf100 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf102 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf105 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf107 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf109 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf111 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf114 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf116 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf118 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf120 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf123 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf125 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf127 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf129 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf132 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf134 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf136 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf138 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf141 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf143 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf145 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) triton_poi_fused_div_sub_1[grid(2097152)](primals_1, buf0, primals_3, buf1, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, 2097152, XBLOCK=512, num_warps= 8, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) triton_per_fused__softmax_2[grid(16384)](buf2, buf3, buf4, 16384, 64, XBLOCK=8, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf9 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf13 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf16 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf20 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf22 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf25 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf27 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf29 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf31 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf34 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf36 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf38 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf40 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf43 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf45 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf47 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf49 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf52 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf54 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf56 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf58 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf61 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf63 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf65 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf67 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sub_sum_3[grid(512)](buf1, primals_3, buf2, buf3, buf4, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf5, buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, 512, 4096, XBLOCK=1, RBLOCK= 1024, num_warps=16, num_stages=1) buf70 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf72 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf74 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf76 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf79 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf81 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf83 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf85 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf88 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf90 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf92 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf94 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf97 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf99 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf101 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf103 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf106 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf108 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf110 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf112 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf115 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf117 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf119 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf121 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf124 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf126 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf128 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf130 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sum_4[grid(512)](buf69, buf2, buf3, buf4, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf70, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, 512, 4096, XBLOCK=1, RBLOCK= 1024, num_warps=16, num_stages=1) buf133 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf135 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf137 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf139 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf142 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf144 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf146 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sum_5[grid(512)](buf132, buf2, buf3, buf4, buf134, buf136, buf138, buf141, buf143, buf145, buf133, buf135, buf137, buf139, buf142, buf144, buf146, 512, 4096, XBLOCK=1, RBLOCK=1024, num_warps=16, num_stages=1) buf14 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32) buf23 = buf14 del buf14 buf32 = buf23 del buf23 buf41 = buf32 del buf32 buf50 = buf41 del buf41 buf59 = buf50 del buf50 buf68 = buf59 del buf59 buf77 = buf68 del buf68 buf86 = buf77 del buf77 buf95 = buf86 del buf86 buf104 = buf95 del buf95 buf113 = buf104 del buf104 buf122 = buf113 del buf113 buf131 = buf122 del buf122 buf140 = buf131 del buf131 buf147 = buf140 del buf140 buf148 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32) buf149 = reinterpret_tensor(buf148, (4, 64, 1), (64, 1, 1), 0) del buf148 triton_per_fused_copy_linalg_vector_norm_zeros_6[grid(256)](buf147, buf149, buf13, buf11, buf9, buf7, buf5, buf22, buf20, buf18, buf16, buf31, buf29, buf27, buf25, buf40, buf38, buf36, buf34, buf49, buf47, buf45, buf43, buf58, buf56, buf54, buf52, buf67, buf65, buf63, buf61, buf76, buf74, buf72, buf70, buf85, buf83, buf81, buf79, buf94, buf92, buf90, buf88, buf103, buf101, buf99, buf97, buf112, buf110, buf108, buf106, buf121, buf119, buf117, buf115, buf130, buf128, buf126, buf124, buf139, buf137, buf135, buf133, buf146, buf144, buf142, 256, 128, XBLOCK=1, num_warps=2, num_stages=1) del buf101 del buf103 del buf106 del buf108 del buf11 del buf110 del buf112 del buf115 del buf117 del buf119 del buf121 del buf124 del buf126 del buf128 del buf13 del buf130 del buf133 del buf135 del buf137 del buf139 del buf142 del buf144 del buf146 del buf16 del buf18 del buf20 del buf22 del buf25 del buf27 del buf29 del buf31 del buf34 del buf36 del buf38 del buf40 del buf43 del buf45 del buf47 del buf49 del buf5 del buf52 del buf54 del buf56 del buf58 del buf61 del buf63 del buf65 del buf67 del buf7 del buf70 del buf72 del buf74 del buf76 del buf79 del buf81 del buf83 del buf85 del buf88 del buf9 del buf90 del buf92 del buf94 del buf97 del buf99 buf150 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf151 = reinterpret_tensor(buf150, (4, 1), (1, 1), 0) del buf150 buf152 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32) triton_red_fused_div_linalg_vector_norm_7[grid(4)](buf151, buf147, buf149, buf152, 4, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) return (buf152, primals_2, buf1, buf2, buf3, buf4, reinterpret_tensor( primals_3, (1, 128), (128, 1), 0), buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, buf147, buf149, buf151) class NetVLADNew(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=64, dim=128, normalize_input=True, vladv2=False, use_faiss=True): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. vladv2 : bool If true, use vladv2 otherwise use vladv1 """ super().__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = 0 self.vladv2 = vladv2 self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias= vladv2) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) self.use_faiss = use_faiss def init_params(self, clsts, traindescs): if not self.vladv2: clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True) dots = np.dot(clstsAssign, traindescs.T) dots.sort(0) dots = dots[::-1, :] self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha * clstsAssign).unsqueeze(2).unsqueeze(3)) self.conv.bias = None else: if not self.use_faiss: knn = NearestNeighbors(n_jobs=-1) knn.fit(traindescs) del traindescs ds_sq = np.square(knn.kneighbors(clsts, 2)[1]) del knn else: index = faiss.IndexFlatL2(traindescs.shape[1]) index.add(traindescs) del traindescs ds_sq = np.square(index.search(clsts, 2)[1]) del index self.alpha = (-np.log(0.01) / np.mean(ds_sq[:, 1] - ds_sq[:, 0]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) del clsts, ds_sq self.conv.weight = nn.Parameter((2.0 * self.alpha * self. centroids).unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm (dim=1)) def forward(self, input_0): primals_3 = self.centroids primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
carson-sky/Patch-NetVLAD
NetVLAD
false
15,119
[ "MIT" ]
278
7b913626b34dbbe250d6921a6a093512ee513eac
https://github.com/carson-sky/Patch-NetVLAD/tree/7b913626b34dbbe250d6921a6a093512ee513eac
SingleSP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/z4/cz4bbbxtsc2y2dfkmubjzhzxlsr4puhwt2tn6rpccqlyjjxxn4qz.py # Topologically Sorted Source Nodes: [dot, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # dot => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %primals_4), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax] # Source node to ATen node mapping: # weight => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax] # Source node to ATen node mapping: # weight => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, ), (1, )) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (1, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot, tanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf0, primals_2, primals_4, buf1, 64, grid=grid(64), stream=stream0) del buf0 del primals_2 del primals_4 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 0, 1), 0), primals_7, out=buf6) del buf5 return (reinterpret_tensor(buf6, (4, 4), (4, 1), 0), reinterpret_tensor(primals_3, (1, 4), (4, 1), 0), buf1, buf3, reinterpret_tensor(primals_7, (4, 4, 4), (16, 1, 4), 0), primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn from torch.autograd import * import torch.nn.functional as F class SingleSP(nn.Module): def __init__(self, opt): super(SingleSP, self).__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.wh = nn.Linear(self.rnn_size, self.att_hid_size) self.wa = nn.Linear(self.att_hid_size, 1) def forward(self, h, roi_feats, p_roi_feats, att_masks=None): dot = self.wh(h).unsqueeze(1).expand_as(p_roi_feats) + p_roi_feats weight = F.softmax(self.wa(torch.tanh(dot)).squeeze(2), dim=1) if att_masks is not None: weight = weight * att_masks weight = weight / weight.sum(1, keepdim=True) single_feat = torch.bmm(weight.unsqueeze(1), roi_feats).squeeze(1) return single_feat def get_inputs(): return [torch.rand([4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(rnn_size=4, att_hid_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1,), (1,)) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (1, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(64)](buf0, primals_2, primals_4, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_2 del primals_4 buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf1, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 0, 1), 0 ), primals_7, out=buf6) del buf5 return reinterpret_tensor(buf6, (4, 4), (4, 1), 0), reinterpret_tensor( primals_3, (1, 4), (4, 1), 0), buf1, buf3, reinterpret_tensor(primals_7 , (4, 4, 4), (16, 1, 4), 0), primals_5 class SingleSPNew(nn.Module): def __init__(self, opt): super(SingleSPNew, self).__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.wh = nn.Linear(self.rnn_size, self.att_hid_size) self.wa = nn.Linear(self.att_hid_size, 1) def forward(self, input_0, input_1, input_2): primals_1 = self.wh.weight primals_2 = self.wh.bias primals_5 = self.wa.weight primals_6 = self.wa.bias primals_3 = input_0 primals_4 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
daqingliu/CAVP
SingleSP
false
15,120
[ "MIT" ]
49
d383affde78dbc75e369095c27954dcdd79478d0
https://github.com/daqingliu/CAVP/tree/d383affde78dbc75e369095c27954dcdd79478d0
FLogSigmoid
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rx/crxngcmdawy753uc5ara73bawc5jjsos3ttdlxnlwzuxueegsuj4.py # Topologically Sorted Source Nodes: [log_sigmoid], Original ATen: [aten.log_sigmoid_forward] # Source node to ATen node mapping: # log_sigmoid => abs_1, exp, full_default, log1p, minimum, neg, sub # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) triton_poi_fused_log_sigmoid_forward_0 = async_compile.triton('triton_poi_fused_log_sigmoid_forward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_log_sigmoid_forward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_log_sigmoid_forward_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = triton_helpers.minimum(tmp1, tmp0) tmp3 = tl_math.abs(tmp0) tmp4 = -tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = libdevice.log1p(tmp5) tmp7 = tmp2 - tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_sigmoid], Original ATen: [aten.log_sigmoid_forward] stream0 = get_raw_stream(0) triton_poi_fused_log_sigmoid_forward_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FLogSigmoid(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FLogSigmoid, self).__init__() def forward(self, x): from torch.nn import functional as F return F.logsigmoid(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_log_sigmoid_forward_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = triton_helpers.minimum(tmp1, tmp0) tmp3 = tl_math.abs(tmp0) tmp4 = -tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = libdevice.log1p(tmp5) tmp7 = tmp2 - tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_log_sigmoid_forward_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FLogSigmoidNew(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FLogSigmoidNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
FLogSigmoid
false
15,121
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
SpatialAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/qi/cqinh332474qtv7bgen4bcfz2yfclns66jnudr7z7wmvlrgqoduc.py # Topologically Sorted Source Nodes: [targetT], Original ATen: [aten.clone, aten.transpose] # Source node to ATen node mapping: # targetT => clone # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %permute_5 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%clone, [0, 2, 1]), kwargs = {}) triton_poi_fused_clone_transpose_0 = async_compile.triton('triton_poi_fused_clone_transpose_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_transpose_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x1 + (16*y0)), xmask & ymask) tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + (4*x1) + (64*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/72/c72jfenzv3gdxfoy3igozkdtfrz2gb73xeusghpscoqm63hcwlhe.py # Topologically Sorted Source Nodes: [mul, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax] # Source node to ATen node mapping: # attn_2 => add # attn_3 => amax, exp, sub, sum_1 # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, -10000), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_poi_fused__softmax_add_mul_1 = async_compile.triton('triton_poi_fused__softmax_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_mul_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*(x0 // 16)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + (4*(x0 // 16))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (2 + (4*(x0 // 16))), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (3 + (4*(x0 // 16))), xmask, eviction_policy='evict_last') tmp2 = -10000.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp6 * tmp2 tmp8 = tmp5 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp12 = tmp11 * tmp2 tmp13 = tmp10 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp17 = tmp16 * tmp2 tmp18 = tmp15 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + (x0), tmp19, xmask) tl.store(out_ptr1 + (x0), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ru/cruro4e7x4uaqdi7p4qn5k4epseq5ksvrmsw4touw42mb2eqspec.py # Topologically Sorted Source Nodes: [attn_5], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn_5 => clone_2 # Graph fragment: # %clone_2 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y3), ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x2 + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x2 + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = -10000.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(out_ptr0 + (x2 + (16*y3)), tmp9, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1)) buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32) buf7 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [targetT], Original ATen: [aten.clone, aten.transpose] stream0 = get_raw_stream(0) triton_poi_fused_clone_transpose_0.run(primals_1, buf1, buf7, 16, 16, grid=grid(16, 16), stream=stream0) del primals_1 buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [targetT, attn], Original ATen: [aten.clone, aten.bmm] extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = empty_strided_cuda((64, 1), (1, 64), torch.float32) buf4 = empty_strided_cuda((64, 1), (1, 64), torch.float32) # Topologically Sorted Source Nodes: [mul, attn_2, attn_3], Original ATen: [aten.mul, aten.add, aten._softmax] triton_poi_fused__softmax_add_mul_1.run(buf2, primals_4, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [attn_5], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf2, primals_4, buf3, buf4, buf5, 16, 16, grid=grid(16, 16), stream=stream0) del buf3 del buf4 buf6 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [weightedContext], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf5, out=buf6) return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_3, primals_4, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16), 0), buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn import torch.utils.data class SpatialAttention(nn.Module): def __init__(self, input_dim, context_dim): super().__init__() self.conv_context = nn.Conv2d(context_dim, input_dim, 1, stride=1, padding=0, bias=False) self.sm = nn.Softmax(dim=1) def forward(self, input, context, mask): """ input: batch x idf x ih x iw (queryL=ihxiw) context: batch x cdf x sourceL """ ih, iw = input.size(2), input.size(3) queryL = ih * iw batch_size, sourceL = context.size(0), context.size(2) target = input.view(batch_size, -1, queryL) targetT = torch.transpose(target, 1, 2).contiguous() sourceT = context.unsqueeze(3) sourceT = self.conv_context(sourceT).squeeze(3) attn = torch.bmm(targetT, sourceT) attn = attn.view(batch_size * queryL, sourceL) if mask is not None: mask = mask.unsqueeze(1).expand(-1, queryL, -1).contiguous().view( batch_size * queryL, -1) attn = attn + mask.float() * -10000 attn = self.sm(attn) attn = attn.view(batch_size, queryL, sourceL) attn = torch.transpose(attn, 1, 2).contiguous() weightedContext = torch.bmm(sourceT, attn) weightedContext = weightedContext.view(batch_size, -1, ih, iw) attn = attn.view(batch_size, -1, ih, iw) return weightedContext, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4]) ] def get_init_inputs(): return [[], {'input_dim': 4, 'context_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask) tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + 4 * x1 + 64 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_mul_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * (x0 // 16), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + 4 * (x0 // 16)), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr1 + (2 + 4 * (x0 // 16)), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (3 + 4 * (x0 // 16)), xmask, eviction_policy= 'evict_last') tmp2 = -10000.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp6 * tmp2 tmp8 = tmp5 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp12 = tmp11 * tmp2 tmp13 = tmp10 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp17 = tmp16 * tmp2 tmp18 = tmp15 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x0, tmp19, xmask) tl.store(out_ptr1 + x0, tmp30, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x2 + 16 * y1), xmask & ymask, eviction_policy ='evict_last') tmp8 = tl.load(in_ptr3 + (x2 + 16 * y1), xmask & ymask, eviction_policy ='evict_last') tmp2 = -10000.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(out_ptr0 + (x2 + 16 * y3), tmp9, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding= (0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1)) buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32) buf7 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_clone_transpose_0[grid(16, 16)](primals_1, buf1, buf7, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = empty_strided_cuda((64, 1), (1, 64), torch.float32) buf4 = empty_strided_cuda((64, 1), (1, 64), torch.float32) triton_poi_fused__softmax_add_mul_1[grid(64)](buf2, primals_4, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0) del buf1 triton_poi_fused_clone_2[grid(16, 16)](buf2, primals_4, buf3, buf4, buf5, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del buf3 del buf4 buf6 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf5, out=buf6) return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_3, primals_4, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16), 0), buf7 class SpatialAttentionNew(nn.Module): def __init__(self, input_dim, context_dim): super().__init__() self.conv_context = nn.Conv2d(context_dim, input_dim, 1, stride=1, padding=0, bias=False) self.sm = nn.Softmax(dim=1) def forward(self, input_0, input_1, input_2): primals_3 = self.conv_context.weight primals_1 = input_0 primals_2 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
dariopavllo/textured-3d-gan
SpatialAttention
false
15,122
[ "MIT" ]
77
d419cee94c5913a900e08b15c0438eb2c89ce4d4
https://github.com/dariopavllo/textured-3d-gan/tree/d419cee94c5913a900e08b15c0438eb2c89ce4d4
AsymmetricLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/3o/c3oppgv2z6rui2adtrud2og25lthom52d25txessl4yzaf7uj73z.py # Topologically Sorted Source Nodes: [x_sigmoid, clamp_1, log, los_pos, sub_1, xs_neg, add, xs_neg_1, clamp_2, log_1, los_neg, loss, pt0, sub_2, pt1, pt, sub_4, mul_4, sub_3, mul_5, one_sided_gamma, one_sided_w, loss_1, sum_1, neg], Original ATen: [aten.sigmoid, aten.clamp, aten.log, aten.mul, aten.rsub, aten.add, aten.pow, aten.sum, aten.neg] # Source node to ATen node mapping: # add => add # clamp_1 => clamp_max_1, clamp_min # clamp_2 => clamp_max_2, clamp_min_1 # log => log # log_1 => log_1 # los_neg => mul_1 # los_pos => mul # loss => add_1 # loss_1 => mul_6 # mul_4 => mul_4 # mul_5 => mul_5 # neg => neg # one_sided_gamma => add_3 # one_sided_w => pow_1 # pt => add_2 # pt0 => mul_2 # pt1 => mul_3 # sub_1 => sub_1 # sub_2 => sub_2 # sub_3 => sub_3 # sub_4 => sub_4 # sum_1 => sum_1 # x_sigmoid => sigmoid # xs_neg => sub # xs_neg_1 => clamp_max # Graph fragment: # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sigmoid, 1e-08), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 0.99999999), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_max_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %log), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 0.05), kwargs = {}) # %clamp_max : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add, 1), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%clamp_max, 1e-08), kwargs = {}) # %clamp_max_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 0.99999999), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_max_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %log_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg1_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, %sub_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 4), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%sub_4, %add_3), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %pow_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_6,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {}) triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sum_0 = async_compile.triton('triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tl.sigmoid(tmp1) tmp3 = 1e-08 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 0.99999999 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tl_math.log(tmp6) tmp8 = tmp0 * tmp7 tmp9 = 1.0 tmp10 = tmp9 - tmp0 tmp11 = tmp9 - tmp2 tmp12 = 0.05 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.minimum(tmp13, tmp9) tmp15 = triton_helpers.maximum(tmp14, tmp3) tmp16 = triton_helpers.minimum(tmp15, tmp5) tmp17 = tl_math.log(tmp16) tmp18 = tmp10 * tmp17 tmp19 = tmp8 + tmp18 tmp20 = tmp2 * tmp0 tmp21 = tmp14 * tmp10 tmp22 = tmp20 + tmp21 tmp23 = tmp9 - tmp22 tmp24 = tmp0 * tmp9 tmp25 = 4.0 tmp26 = tmp10 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.pow(tmp23, tmp27) tmp29 = tmp19 * tmp28 tmp30 = tl.broadcast_to(tmp29, [RBLOCK]) tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tmp33 = -tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp33, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_sigmoid, clamp_1, log, los_pos, sub_1, xs_neg, add, xs_neg_1, clamp_2, log_1, los_neg, loss, pt0, sub_2, pt1, pt, sub_4, mul_4, sub_3, mul_5, one_sided_gamma, one_sided_w, loss_1, sum_1, neg], Original ATen: [aten.sigmoid, aten.clamp, aten.log, aten.mul, aten.rsub, aten.add, aten.pow, aten.sum, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sum_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class AsymmetricLoss(nn.Module): def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-08, disable_torch_grad_focal_loss=False): super(AsymmetricLoss, self).__init__() self.gamma_neg = gamma_neg self.gamma_pos = gamma_pos self.clip = clip self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss self.eps = eps def forward(self, x, y): """" Parameters ---------- x: input logits y: targets (multi-label binarized vector) """ x_sigmoid = torch.sigmoid(x) xs_pos = x_sigmoid xs_neg = 1 - x_sigmoid if self.clip is not None and self.clip > 0: xs_neg = (xs_neg + self.clip).clamp(max=1) los_pos = y * torch.log(xs_pos.clamp(min=self.eps, max=1 - self.eps)) los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps, max=1 - self.eps)) loss = los_pos + los_neg if self.gamma_neg > 0 or self.gamma_pos > 0: if self.disable_torch_grad_focal_loss: torch._C.set_grad_enabled(False) pt0 = xs_pos * y pt1 = xs_neg * (1 - y) pt = pt0 + pt1 one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y) one_sided_w = torch.pow(1 - pt, one_sided_gamma) if self.disable_torch_grad_focal_loss: torch._C.set_grad_enabled(True) loss *= one_sided_w return -loss.sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tl.sigmoid(tmp1) tmp3 = 1e-08 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 0.99999999 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tl_math.log(tmp6) tmp8 = tmp0 * tmp7 tmp9 = 1.0 tmp10 = tmp9 - tmp0 tmp11 = tmp9 - tmp2 tmp12 = 0.05 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.minimum(tmp13, tmp9) tmp15 = triton_helpers.maximum(tmp14, tmp3) tmp16 = triton_helpers.minimum(tmp15, tmp5) tmp17 = tl_math.log(tmp16) tmp18 = tmp10 * tmp17 tmp19 = tmp8 + tmp18 tmp20 = tmp2 * tmp0 tmp21 = tmp14 * tmp10 tmp22 = tmp20 + tmp21 tmp23 = tmp9 - tmp22 tmp24 = tmp0 * tmp9 tmp25 = 4.0 tmp26 = tmp10 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.pow(tmp23, tmp27) tmp29 = tmp19 * tmp28 tmp30 = tl.broadcast_to(tmp29, [RBLOCK]) tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tmp33 = -tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp33, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sum_0[grid(1)]( buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class AsymmetricLossNew(nn.Module): def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-08, disable_torch_grad_focal_loss=False): super(AsymmetricLossNew, self).__init__() self.gamma_neg = gamma_neg self.gamma_pos = gamma_pos self.clip = clip self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
davidaderup/query2labels
AsymmetricLoss
false
15,123
[ "MIT" ]
164
5a10c861dda85d94ba01ec6ad4119eef67a9f441
https://github.com/davidaderup/query2labels/tree/5a10c861dda85d94ba01ec6ad4119eef67a9f441
AsymmetricLossOptimized
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/mg/cmg5ja3sgkvvdxfwd5camevipmbnfq5nmbb2zt4mwgvk3wq4fvnt.py # Topologically Sorted Source Nodes: [sigmoid, clamp, log, mul, sub, sub_1, add_, clamp_, clamp_1, log_1, mul_1, add__1, mul_2, sub_2, mul_3, sub_3, mul_4, mul_5, add, pow_1, imul, sum_1, neg, _loss, truediv_1, _loss_1], Original ATen: [aten.sigmoid, aten.clamp, aten.log, aten.mul, aten.rsub, aten.add, aten.sub, aten.pow, aten.sum, aten.neg, aten.div] # Source node to ATen node mapping: # _loss => div # _loss_1 => mul_7 # add => add_2 # add_ => add # add__1 => add_1 # clamp => clamp_min # clamp_ => clamp_max # clamp_1 => clamp_min_1 # imul => mul_6 # log => log # log_1 => log_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # neg => neg # pow_1 => pow_1 # sigmoid => sigmoid # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # sub_3 => sub_3 # sum_1 => sum_1 # truediv_1 => div_1 # Graph fragment: # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg1_1,), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sigmoid, 1e-05), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {}) # %sub : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 0.05), kwargs = {}) # %clamp_max : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add, 1), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%clamp_max, 1e-05), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %log_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg0_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_2), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, %sub), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 4), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%sub_3, %add_2), kwargs = {}) # %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %pow_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_6,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, 4), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 4), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, 1000), kwargs = {}) triton_per_fused_add_clamp_div_log_mul_neg_pow_rsub_sigmoid_sub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_div_log_mul_neg_pow_rsub_sigmoid_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=(8,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_div_log_mul_neg_pow_rsub_sigmoid_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_div_log_mul_neg_pow_rsub_sigmoid_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp4 * tmp0 tmp6 = tmp1 - tmp4 tmp7 = 0.05 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.minimum(tmp8, tmp1) tmp10 = tmp9 * tmp2 tmp11 = tmp1 - tmp5 tmp12 = tmp11 - tmp10 tmp13 = tmp0 * tmp1 tmp14 = 4.0 tmp15 = tmp2 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.pow(tmp12, tmp16) tmp18 = 1e-05 tmp19 = triton_helpers.maximum(tmp4, tmp18) tmp20 = tl_math.log(tmp19) tmp21 = tmp0 * tmp20 tmp22 = triton_helpers.maximum(tmp9, tmp18) tmp23 = tl_math.log(tmp22) tmp24 = tmp2 * tmp23 tmp25 = tmp21 + tmp24 tmp26 = tmp25 * tmp17 tmp27 = tl.broadcast_to(tmp26, [RBLOCK]) tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0)) tmp30 = -tmp29 tmp31 = 0.25 tmp32 = tmp30 * tmp31 tmp33 = tmp32 * tmp31 tmp34 = 1000.0 tmp35 = tmp33 * tmp34 tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp2, None) tl.store(out_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp5, None) tl.store(out_ptr2 + (tl.broadcast_to(r0, [RBLOCK])), tmp10, None) tl.store(out_ptr3 + (tl.broadcast_to(r0, [RBLOCK])), tmp17, None) tl.store(out_ptr4 + (tl.broadcast_to(r0, [RBLOCK])), tmp26, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp35, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [sigmoid, clamp, log, mul, sub, sub_1, add_, clamp_, clamp_1, log_1, mul_1, add__1, mul_2, sub_2, mul_3, sub_3, mul_4, mul_5, add, pow_1, imul, sum_1, neg, _loss, truediv_1, _loss_1], Original ATen: [aten.sigmoid, aten.clamp, aten.log, aten.mul, aten.rsub, aten.add, aten.sub, aten.pow, aten.sum, aten.neg, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_clamp_div_log_mul_neg_pow_rsub_sigmoid_sub_sum_0.run(buf6, arg0_1, arg1_1, buf0, buf1, buf2, buf3, buf4, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf6, buf3, buf4, buf2, buf1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class AsymmetricLossOptimized(nn.Module): """ Notice - optimized version, minimizes memory allocation and gpu uploading, favors inplace operations""" def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-05, disable_torch_grad_focal_loss=False): super(AsymmetricLossOptimized, self).__init__() self.gamma_neg = gamma_neg self.gamma_pos = gamma_pos self.clip = clip self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss self.eps = eps (self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg ) = (self.asymmetric_w) = (self.loss) = None def forward(self, x, y): """" Parameters ---------- x: input logits y: targets (multi-label binarized vector) """ self.targets = y self.anti_targets = 1 - y self.xs_pos = torch.sigmoid(x) self.xs_neg = 1.0 - self.xs_pos if self.clip is not None and self.clip > 0: self.xs_neg.add_(self.clip).clamp_(max=1) self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps)) self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min= self.eps))) if self.gamma_neg > 0 or self.gamma_pos > 0: if self.disable_torch_grad_focal_loss: with torch.no_grad(): self.xs_pos = self.xs_pos * self.targets self.xs_neg = self.xs_neg * self.anti_targets self.asymmetric_w = torch.pow(1 - self.xs_pos - self. xs_neg, self.gamma_pos * self.targets + self. gamma_neg * self.anti_targets) self.loss *= self.asymmetric_w else: self.xs_pos = self.xs_pos * self.targets self.xs_neg = self.xs_neg * self.anti_targets self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg, self.gamma_pos * self.targets + self.gamma_neg * self. anti_targets) self.loss *= self.asymmetric_w _loss = -self.loss.sum() / x.size(0) _loss = _loss / y.size(1) * 1000 return _loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_div_log_mul_neg_pow_rsub_sigmoid_sub_sum_0( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp4 * tmp0 tmp6 = tmp1 - tmp4 tmp7 = 0.05 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.minimum(tmp8, tmp1) tmp10 = tmp9 * tmp2 tmp11 = tmp1 - tmp5 tmp12 = tmp11 - tmp10 tmp13 = tmp0 * tmp1 tmp14 = 4.0 tmp15 = tmp2 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.pow(tmp12, tmp16) tmp18 = 1e-05 tmp19 = triton_helpers.maximum(tmp4, tmp18) tmp20 = tl_math.log(tmp19) tmp21 = tmp0 * tmp20 tmp22 = triton_helpers.maximum(tmp9, tmp18) tmp23 = tl_math.log(tmp22) tmp24 = tmp2 * tmp23 tmp25 = tmp21 + tmp24 tmp26 = tmp25 * tmp17 tmp27 = tl.broadcast_to(tmp26, [RBLOCK]) tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0)) tmp30 = -tmp29 tmp31 = 0.25 tmp32 = tmp30 * tmp31 tmp33 = tmp32 * tmp31 tmp34 = 1000.0 tmp35 = tmp33 * tmp34 tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp2, None) tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp5, None) tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp10, None) tl.store(out_ptr3 + tl.broadcast_to(r0, [RBLOCK]), tmp17, None) tl.store(out_ptr4 + tl.broadcast_to(r0, [RBLOCK]), tmp26, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp35, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 get_raw_stream(0) triton_per_fused_add_clamp_div_log_mul_neg_pow_rsub_sigmoid_sub_sum_0[ grid(1)](buf6, arg0_1, arg1_1, buf0, buf1, buf2, buf3, buf4, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf6, buf3, buf4, buf2, buf1, buf0 class AsymmetricLossOptimizedNew(nn.Module): """ Notice - optimized version, minimizes memory allocation and gpu uploading, favors inplace operations""" def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-05, disable_torch_grad_focal_loss=False): super(AsymmetricLossOptimizedNew, self).__init__() self.gamma_neg = gamma_neg self.gamma_pos = gamma_pos self.clip = clip self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss self.eps = eps (self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg ) = (self.asymmetric_w) = (self.loss) = None def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
davidaderup/query2labels
AsymmetricLossOptimized
false
15,124
[ "MIT" ]
164
5a10c861dda85d94ba01ec6ad4119eef67a9f441
https://github.com/davidaderup/query2labels/tree/5a10c861dda85d94ba01ec6ad4119eef67a9f441
FHardtanh
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/y4/cy4gn2hznhvh4fqxxinbdqwsdnacyr5awtzvilkq3jmjo2qi7mch.py # Topologically Sorted Source Nodes: [hardtanh], Original ATen: [aten.hardtanh] # Source node to ATen node mapping: # hardtanh => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0.5035233756184968), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0470195501642494), kwargs = {}) triton_poi_fused_hardtanh_0 = async_compile.triton('triton_poi_fused_hardtanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5035233756184968 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.0470195501642494 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hardtanh], Original ATen: [aten.hardtanh] stream0 = get_raw_stream(0) triton_poi_fused_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import torch.nn as nn class FHardtanh(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FHardtanh, self).__init__() self.min_val = random.random() self.max_val = self.min_val + random.random() def forward(self, x): from torch.nn import functional as F return F.hardtanh(x, min_val=self.min_val, max_val=self.max_val) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5035233756184968 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.0470195501642494 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class FHardtanhNew(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FHardtanhNew, self).__init__() self.min_val = random.random() self.max_val = self.min_val + random.random() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
FHardtanh
false
15,125
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
OutputSP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gz/cgz4gcc4lup72f7lzpqk3fmhd65egrrglzmsooxqpdq5ba3yf4dp.py # Topologically Sorted Source Nodes: [feats], Original ATen: [aten.stack] # Source node to ATen node mapping: # feats => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2, %primals_3], 1), kwargs = {}) triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7b/c7bo3z664mcsir4dea5p6ri73phknbfrm6ah6ij7xfeuhu2js5fh.py # Topologically Sorted Source Nodes: [dot, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # dot => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %view_2), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 12) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + (x3), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(in_out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4f/c4fuixnnivk5vplz62akwht7ocumcl4z3zqvkgln7uudng4qpxzu.py # Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax] # Source node to ATen node mapping: # weight => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 3) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (3*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (3*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (3*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp0 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tmp1 - tmp5 tmp9 = tl_math.exp(tmp8) tmp10 = tmp2 - tmp5 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = tmp4 - tmp5 tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp16 = tmp7 / tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [feats], Original ATen: [aten.stack] stream0 = get_raw_stream(0) triton_poi_fused_stack_0.run(primals_1, primals_2, primals_3, buf0, 48, grid=grid(48), stream=stream0) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((12, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (12, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_8, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = reinterpret_tensor(buf1, (4, 3, 4), (12, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [dot, tanh], Original ATen: [aten.add, aten.tanh] triton_poi_fused_add_tanh_1.run(buf3, buf2, primals_7, primals_5, 48, grid=grid(48), stream=stream0) del primals_5 del primals_7 buf5 = empty_strided_cuda((12, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf3, (12, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_10 buf6 = empty_strided_cuda((4, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 12, grid=grid(12), stream=stream0) buf7 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 3), (3, 0, 1), 0), reinterpret_tensor(buf0, (4, 3, 4), (12, 4, 1), 0), out=buf7) del buf6 return (reinterpret_tensor(buf7, (4, 4), (4, 1), 0), primals_8, reinterpret_tensor(buf0, (4, 3, 4), (12, 4, 1), 0), buf3, buf5, primals_9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn from torch.autograd import * import torch.nn.functional as F class OutputSP(nn.Module): def __init__(self, opt): super(OutputSP, self).__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.wv = nn.Linear(self.rnn_size, self.att_hid_size) self.wh = nn.Linear(self.rnn_size, self.att_hid_size) self.wa = nn.Linear(self.att_hid_size, 1) def forward(self, h, single_feat, comp_feat, fc_feats): feats = torch.stack([single_feat, comp_feat, fc_feats], dim=1) feats_ = self.wv(feats) dot = self.wh(h).unsqueeze(1).expand_as(feats_) + feats_ weight = F.softmax(self.wa(torch.tanh(dot)).squeeze(2), dim=1) output_feat = torch.bmm(weight.unsqueeze(1), feats).squeeze(1) return output_feat def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(rnn_size=4, att_hid_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 12 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + x3, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 3 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 3 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 3 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 3 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp0 - tmp5 tmp7 = tl_math.exp(tmp6) tmp8 = tmp1 - tmp5 tmp9 = tl_math.exp(tmp8) tmp10 = tmp2 - tmp5 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = tmp4 - tmp5 tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp16 = tmp7 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(48)](primals_1, primals_2, primals_3, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((12, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (12, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_8, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = reinterpret_tensor(buf1, (4, 3, 4), (12, 4, 1), 0) del buf1 triton_poi_fused_add_tanh_1[grid(48)](buf3, buf2, primals_7, primals_5, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 del primals_7 buf5 = empty_strided_cuda((12, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(buf3, (12, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_10 buf6 = empty_strided_cuda((4, 3), (3, 1), torch.float32) triton_poi_fused__softmax_2[grid(12)](buf5, buf6, 12, XBLOCK=16, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 3), (3, 0, 1), 0 ), reinterpret_tensor(buf0, (4, 3, 4), (12, 4, 1), 0), out=buf7) del buf6 return reinterpret_tensor(buf7, (4, 4), (4, 1), 0 ), primals_8, reinterpret_tensor(buf0, (4, 3, 4), (12, 4, 1), 0 ), buf3, buf5, primals_9 class OutputSPNew(nn.Module): def __init__(self, opt): super(OutputSPNew, self).__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.wv = nn.Linear(self.rnn_size, self.att_hid_size) self.wh = nn.Linear(self.rnn_size, self.att_hid_size) self.wa = nn.Linear(self.att_hid_size, 1) def forward(self, input_0, input_1, input_2, input_3): primals_1 = self.wv.weight primals_5 = self.wv.bias primals_2 = self.wh.weight primals_7 = self.wh.bias primals_9 = self.wa.weight primals_10 = self.wa.bias primals_3 = input_0 primals_4 = input_1 primals_6 = input_2 primals_8 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
daqingliu/CAVP
OutputSP
false
15,126
[ "MIT" ]
49
d383affde78dbc75e369095c27954dcdd79478d0
https://github.com/daqingliu/CAVP/tree/d383affde78dbc75e369095c27954dcdd79478d0
FClipTest
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/cj/ccjkqdh6joaeebw6q6ggmziytclal35hw7nzz7xfz4flvsyy6z4v.py # Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp] # Source node to ATen node mapping: # clamp => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0.6920666406817362), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.044775393513254), kwargs = {}) triton_poi_fused_clamp_0 = async_compile.triton('triton_poi_fused_clamp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.6920666406817362 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.044775393513254 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class FClipTest(nn.Module): """ Test for nn.functional types """ def __init__(self): self.low = np.random.uniform(-1, 1) self.high = np.random.uniform(1, 2) super(FClipTest, self).__init__() def forward(self, x): return x.clamp(self.low, self.high) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.6920666406817362 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.044775393513254 tmp4 = triton_helpers.minimum(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class FClipTestNew(nn.Module): """ Test for nn.functional types """ def __init__(self): self.low = np.random.uniform(-1, 1) self.high = np.random.uniform(1, 2) super(FClipTestNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
FClipTest
false
15,127
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
EALSTM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pc/cpcr3623cktnc2724ttepfbk25u5ng32io6u3pxmpjwbdpi5psaw.py # Topologically Sorted Source Nodes: [h_0], Original ATen: [aten.zero] # Source node to ATen node mapping: # h_0 => full # Graph fragment: # %full : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zero_0 = async_compile.triton('triton_poi_fused_zero_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zero_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zero_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/g4/cg4o5gjny3k72i2nzpwyrk5uosksnmkgjljkkkxdjlxtab5kkwfc.py # Topologically Sorted Source Nodes: [i, sigmoid_1, mul, tanh, mul_1, c_1, sigmoid_2, tanh_1, h_1], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add, aten.sigmoid_backward] # Source node to ATen node mapping: # c_1 => add_1 # h_1 => mul_2 # i => sigmoid # mul => mul # mul_1 => mul_1 # sigmoid_1 => sigmoid_1 # sigmoid_2 => sigmoid_2 # tanh => tanh # tanh_1 => tanh_1 # Graph fragment: # %sigmoid : [num_users=4] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {}) # %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %full), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {}) # %add_1 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %mul_2 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {}) # %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul_65 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub_15), kwargs = {}) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask) tmp6 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask) tmp14 = tl.load(in_ptr3 + (x2), xmask) tmp21 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask) tmp22 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (4 + x0 + (12*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp12 = 0.0 tmp13 = tmp11 * tmp12 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp15 * tmp5 tmp17 = tmp13 + tmp16 tmp18 = 1.0 tmp19 = tmp18 - tmp11 tmp20 = tmp11 * tmp19 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tl.sigmoid(tmp25) tmp27 = libdevice.tanh(tmp17) tmp28 = tmp26 * tmp27 tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp17, xmask) tl.store(out_ptr2 + (x2), tmp20, xmask) tl.store(out_ptr3 + (x2), tmp26, xmask) tl.store(out_ptr4 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p4/cp4wygqdedsof2jdfj6rmpsnu4jdgqwk3nlpiazkevshiyhw2hwa.py # Topologically Sorted Source Nodes: [i, sigmoid_3, mul_3, tanh_2, mul_4, c_2, sigmoid_4, tanh_3, h_2], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add] # Source node to ATen node mapping: # c_2 => add_3 # h_2 => mul_5 # i => sigmoid # mul_3 => mul_3 # mul_4 => mul_4 # sigmoid_3 => sigmoid_3 # sigmoid_4 => sigmoid_4 # tanh_2 => tanh_2 # tanh_3 => tanh_3 # Graph fragment: # %sigmoid : [num_users=4] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {}) # %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_3,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_3, %add_1), kwargs = {}) # %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_5,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh_2), kwargs = {}) # %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {}) # %sigmoid_4 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_4,), kwargs = {}) # %tanh_3 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_3,), kwargs = {}) # %mul_5 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_4, %tanh_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp7 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask) tmp12 = tl.load(in_ptr3 + (x2), xmask) tmp14 = tl.load(in_ptr4 + (x2), xmask) tmp18 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask) tmp19 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (4 + x0 + (12*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tl.sigmoid(tmp22) tmp24 = libdevice.tanh(tmp17) tmp25 = tmp23 * tmp24 tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp11, xmask) tl.store(out_ptr2 + (x2), tmp17, xmask) tl.store(out_ptr3 + (x2), tmp23, xmask) tl.store(out_ptr4 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2w/c2wa5tlelwq2czrn3trabgz7lu7yi4egl6cdkvai6twtfo6xajyh.py # Topologically Sorted Source Nodes: [i, sigmoid_7, mul_9, tanh_6, mul_10, c_4, tanh_7], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add] # Source node to ATen node mapping: # c_4 => add_7 # i => sigmoid # mul_10 => mul_10 # mul_9 => mul_9 # sigmoid_7 => sigmoid_7 # tanh_6 => tanh_6 # tanh_7 => tanh_7 # Graph fragment: # %sigmoid : [num_users=4] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {}) # %sigmoid_7 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_9,), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_7, %add_5), kwargs = {}) # %tanh_6 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_11,), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh_6), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mul_10), kwargs = {}) # %tanh_7 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_7,), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp7 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask) tmp12 = tl.load(in_ptr3 + (x2), xmask) tmp14 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tmp18 = libdevice.tanh(tmp17) tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp11, xmask) tl.store(out_ptr2 + (x2), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/u7/cu75wdxs4ehpgxtyx6nb7kbcn276exsymegvoyw4nua5mllpmipl.py # Topologically Sorted Source Nodes: [sigmoid_8], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid_8 => sigmoid_8 # Graph fragment: # %sigmoid_8 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_10,), kwargs = {}) triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4 + x0 + (12*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2b/c2bnwodkn3ggi3up2ygnpwziyrjmzsq67pvnsaqaijzgjqkn3l2f.py # Topologically Sorted Source Nodes: [c_n], Original ATen: [aten.stack] # Source node to ATen node mapping: # c_n => cat_1 # Graph fragment: # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_1, %add_3, %add_5, %add_7],), kwargs = {}) triton_poi_fused_stack_5 = async_compile.triton('triton_poi_fused_stack_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr2 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.load(in_ptr4 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp23 = tl.sigmoid(tmp22) tmp24 = tl.load(in_ptr5 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp25 = tmp23 * tmp24 tmp26 = tmp21 + tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp16, tmp26, tmp27) tmp29 = tl.where(tmp14, tmp15, tmp28) tmp30 = tl.where(tmp9, tmp10, tmp29) tmp31 = tl.where(tmp4, tmp5, tmp30) tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7v/c7vzfzfugsasaoi62cwx45au5eeuftkul3wi2lpzxsy6petydqdo.py # Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack] # Source node to ATen node mapping: # h_n => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_2, %mul_5, %mul_8, %mul_11],), kwargs = {}) triton_poi_fused_stack_6 = async_compile.triton('triton_poi_fused_stack_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr4 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tl.where(tmp14, tmp15, tmp23) tmp25 = tl.where(tmp9, tmp10, tmp24) tmp26 = tl.where(tmp4, tmp5, tmp25) tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 12), (12, 1)) assert_size_stride(primals_7, (4, 12), (12, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_0], Original ATen: [aten.zero] stream0 = get_raw_stream(0) triton_poi_fused_zero_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [addmm], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_3, (4, 4), (0, 1), 0), primals_5, primals_4, alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, primals_6, out=buf2) buf3 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), primals_7, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i, sigmoid_1, mul, tanh, mul_1, c_1, sigmoid_2, tanh_1, h_1], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add, aten.sigmoid_backward] triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1.run(buf2, primals_2, buf3, buf1, buf4, buf5, buf30, buf6, buf7, 16, grid=grid(16), stream=stream0) buf8 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf7, primals_6, out=buf8) buf9 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 4), primals_7, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i, sigmoid_3, mul_3, tanh_2, mul_4, c_2, sigmoid_4, tanh_3, h_2], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf8, primals_2, buf9, buf5, buf1, buf10, buf11, buf12, buf13, buf14, 16, grid=grid(16), stream=stream0) buf15 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf14, primals_6, out=buf15) buf16 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [mm_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 8), primals_7, out=buf16) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i, sigmoid_5, mul_6, tanh_4, mul_7, c_3, sigmoid_6, tanh_5, h_3], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf15, primals_2, buf16, buf12, buf1, buf17, buf18, buf19, buf20, buf21, 16, grid=grid(16), stream=stream0) buf22 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf21, primals_6, out=buf22) buf23 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [mm_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 12), primals_7, out=buf23) del primals_7 buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [i, sigmoid_7, mul_9, tanh_6, mul_10, c_4, tanh_7], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_3.run(buf22, primals_2, buf23, buf19, buf1, buf24, buf25, buf27, 16, grid=grid(16), stream=stream0) buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_8], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_4.run(buf22, primals_2, buf23, buf26, 16, grid=grid(16), stream=stream0) del buf22 del buf23 del primals_2 buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [c_n], Original ATen: [aten.stack] triton_poi_fused_stack_5.run(buf5, buf12, buf19, buf24, buf1, buf25, buf28, 64, grid=grid(64), stream=stream0) buf29 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack] triton_poi_fused_stack_6.run(buf7, buf14, buf21, buf26, buf27, buf29, 64, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf29, (4, 4, 4), (4, 16, 1), 0), reinterpret_tensor(buf28, (4, 4, 4), (4, 16, 1), 0), buf0, buf1, buf4, buf5, buf6, buf10, buf11, buf12, buf13, buf17, buf18, buf19, buf20, buf24, buf25, buf26, buf27, reinterpret_tensor(primals_1, (4, 4), (1, 16), 12), reinterpret_tensor(primals_6, (12, 4), (1, 12), 0), reinterpret_tensor(buf21, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(buf14, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 16), 4), reinterpret_tensor(buf7, (4, 4), (1, 4), 0), buf30, reinterpret_tensor(primals_1, (4, 4), (1, 16), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Tuple import torch.nn as nn class EALSTM(nn.Module): """Implementation of the Entity-Aware-LSTM (EA-LSTM) TODO: Include paper ref and latex equations Parameters ---------- input_size_dyn : int Number of dynamic features, which are those, passed to the LSTM at each time step. input_size_stat : int Number of static features, which are those that are used to modulate the input gate. hidden_size : int Number of hidden/memory cells. batch_first : bool, optional If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the shape has to be [seq, batch, features], by default True. initial_forget_bias : int, optional Value of the initial forget gate bias, by default 0 """ def __init__(self, input_size_dyn: 'int', input_size_stat: 'int', hidden_size: 'int', batch_first: 'bool'=True, initial_forget_bias: 'int'=0): super(EALSTM, self).__init__() self.input_size_dyn = input_size_dyn self.input_size_stat = input_size_stat self.hidden_size = hidden_size self.batch_first = batch_first self.initial_forget_bias = initial_forget_bias self.weight_ih = nn.Parameter(torch.FloatTensor(input_size_dyn, 3 * hidden_size)) self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 3 * hidden_size)) self.weight_sh = nn.Parameter(torch.FloatTensor(input_size_stat, hidden_size)) self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size)) self.bias_s = nn.Parameter(torch.FloatTensor(hidden_size)) self.reset_parameters() def reset_parameters(self): """Initialize all learnable parameters of the LSTM""" nn.init.orthogonal_(self.weight_ih.data) nn.init.orthogonal_(self.weight_sh) weight_hh_data = torch.eye(self.hidden_size) weight_hh_data = weight_hh_data.repeat(1, 3) self.weight_hh.data = weight_hh_data nn.init.constant_(self.bias.data, val=0) nn.init.constant_(self.bias_s.data, val=0) if self.initial_forget_bias != 0: self.bias.data[:self.hidden_size] = self.initial_forget_bias def forward(self, x_d: 'torch.Tensor', x_s: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """[summary] Parameters ---------- x_d : torch.Tensor Tensor, containing a batch of sequences of the dynamic features. Shape has to match the format specified with batch_first. x_s : torch.Tensor Tensor, containing a batch of static features. Returns ------- h_n : torch.Tensor The hidden states of each time step of each sample in the batch. c_n : torch.Tensor] The cell states of each time step of each sample in the batch. """ if self.batch_first: x_d = x_d.transpose(0, 1) seq_len, batch_size, _ = x_d.size() h_0 = x_d.data.new(batch_size, self.hidden_size).zero_() c_0 = x_d.data.new(batch_size, self.hidden_size).zero_() h_x = h_0, c_0 h_n, c_n = [], [] bias_batch = self.bias.unsqueeze(0).expand(batch_size, *self.bias. size()) bias_s_batch = self.bias_s.unsqueeze(0).expand(batch_size, *self. bias_s.size()) i = torch.sigmoid(torch.addmm(bias_s_batch, x_s, self.weight_sh)) for t in range(seq_len): h_0, c_0 = h_x gates = torch.addmm(bias_batch, h_0, self.weight_hh) + torch.mm(x_d [t], self.weight_ih) f, o, g = gates.chunk(3, 1) c_1 = torch.sigmoid(f) * c_0 + i * torch.tanh(g) h_1 = torch.sigmoid(o) * torch.tanh(c_1) h_n.append(h_1) c_n.append(c_1) h_x = h_1, c_1 h_n = torch.stack(h_n, 0) c_n = torch.stack(c_n, 0) if self.batch_first: h_n = h_n.transpose(0, 1) c_n = c_n.transpose(0, 1) return h_n, c_n def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size_dyn': 4, 'input_size_stat': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zero_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask) tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask) tmp14 = tl.load(in_ptr3 + x2, xmask) tmp21 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp22 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp12 = 0.0 tmp13 = tmp11 * tmp12 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp15 * tmp5 tmp17 = tmp13 + tmp16 tmp18 = 1.0 tmp19 = tmp18 - tmp11 tmp20 = tmp11 * tmp19 tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tl.sigmoid(tmp25) tmp27 = libdevice.tanh(tmp17) tmp28 = tmp26 * tmp27 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp17, xmask) tl.store(out_ptr2 + x2, tmp20, xmask) tl.store(out_ptr3 + x2, tmp26, xmask) tl.store(out_ptr4 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp7 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask) tmp12 = tl.load(in_ptr3 + x2, xmask) tmp14 = tl.load(in_ptr4 + x2, xmask) tmp18 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp19 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tl.sigmoid(tmp22) tmp24 = libdevice.tanh(tmp17) tmp25 = tmp23 * tmp24 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp17, xmask) tl.store(out_ptr3 + x2, tmp23, xmask) tl.store(out_ptr4 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp7 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask) tmp12 = tl.load(in_ptr3 + x2, xmask) tmp14 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp15 = tl.sigmoid(tmp14) tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tmp18 = libdevice.tanh(tmp17) tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp18, xmask) @triton.jit def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr2 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp23 = tl.sigmoid(tmp22) tmp24 = tl.load(in_ptr5 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp25 = tmp23 * tmp24 tmp26 = tmp21 + tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp16, tmp26, tmp27) tmp29 = tl.where(tmp14, tmp15, tmp28) tmp30 = tl.where(tmp9, tmp10, tmp29) tmp31 = tl.where(tmp4, tmp5, tmp30) tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_poi_fused_stack_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tl.where(tmp14, tmp15, tmp23) tmp25 = tl.where(tmp9, tmp10, tmp24) tmp26 = tl.where(tmp4, tmp5, tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 12), (12, 1)) assert_size_stride(primals_7, (4, 12), (12, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zero_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_3, (4, 4), (0, 1), 0), primals_5, primals_4, alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(buf0, primals_6, out=buf2) buf3 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), primals_7, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(16)](buf2 , primals_2, buf3, buf1, buf4, buf5, buf30, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf3 del buf3 extern_kernels.mm(buf7, primals_6, out=buf8) buf9 = buf2 del buf2 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 4), primals_7, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf8, primals_2, buf9, buf5, buf1, buf10, buf11, buf12, buf13, buf14, 16, XBLOCK =16, num_warps=1, num_stages=1) buf15 = buf9 del buf9 extern_kernels.mm(buf14, primals_6, out=buf15) buf16 = buf8 del buf8 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 8), primals_7, out=buf16) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf15, primals_2, buf16, buf12, buf1, buf17, buf18, buf19, buf20, buf21, 16, XBLOCK=16, num_warps=1, num_stages=1) buf22 = buf16 del buf16 extern_kernels.mm(buf21, primals_6, out=buf22) buf23 = buf15 del buf15 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 12 ), primals_7, out=buf23) del primals_7 buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_3[grid(16)](buf22, primals_2, buf23, buf19, buf1, buf24, buf25, buf27, 16, XBLOCK=16, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_sigmoid_4[grid(16)](buf22, primals_2, buf23, buf26, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf22 del buf23 del primals_2 buf28 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_stack_5[grid(64)](buf5, buf12, buf19, buf24, buf1, buf25, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_stack_6[grid(64)](buf7, buf14, buf21, buf26, buf27, buf29, 64, XBLOCK=64, num_warps=1, num_stages=1) return (reinterpret_tensor(buf29, (4, 4, 4), (4, 16, 1), 0), reinterpret_tensor(buf28, (4, 4, 4), (4, 16, 1), 0), buf0, buf1, buf4, buf5, buf6, buf10, buf11, buf12, buf13, buf17, buf18, buf19, buf20, buf24, buf25, buf26, buf27, reinterpret_tensor(primals_1, (4, 4), (1, 16), 12), reinterpret_tensor(primals_6, (12, 4), (1, 12), 0 ), reinterpret_tensor(buf21, (4, 4), (1, 4), 0), reinterpret_tensor (primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(buf14, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 16), 4), reinterpret_tensor(buf7, (4, 4), (1, 4), 0), buf30, reinterpret_tensor(primals_1, (4, 4), (1, 16), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0)) class EALSTMNew(nn.Module): """Implementation of the Entity-Aware-LSTM (EA-LSTM) TODO: Include paper ref and latex equations Parameters ---------- input_size_dyn : int Number of dynamic features, which are those, passed to the LSTM at each time step. input_size_stat : int Number of static features, which are those that are used to modulate the input gate. hidden_size : int Number of hidden/memory cells. batch_first : bool, optional If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the shape has to be [seq, batch, features], by default True. initial_forget_bias : int, optional Value of the initial forget gate bias, by default 0 """ def __init__(self, input_size_dyn: 'int', input_size_stat: 'int', hidden_size: 'int', batch_first: 'bool'=True, initial_forget_bias: 'int'=0): super(EALSTMNew, self).__init__() self.input_size_dyn = input_size_dyn self.input_size_stat = input_size_stat self.hidden_size = hidden_size self.batch_first = batch_first self.initial_forget_bias = initial_forget_bias self.weight_ih = nn.Parameter(torch.FloatTensor(input_size_dyn, 3 * hidden_size)) self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 3 * hidden_size)) self.weight_sh = nn.Parameter(torch.FloatTensor(input_size_stat, hidden_size)) self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size)) self.bias_s = nn.Parameter(torch.FloatTensor(hidden_size)) self.reset_parameters() def reset_parameters(self): """Initialize all learnable parameters of the LSTM""" nn.init.orthogonal_(self.weight_ih.data) nn.init.orthogonal_(self.weight_sh) weight_hh_data = torch.eye(self.hidden_size) weight_hh_data = weight_hh_data.repeat(1, 3) self.weight_hh.data = weight_hh_data nn.init.constant_(self.bias.data, val=0) nn.init.constant_(self.bias_s.data, val=0) if self.initial_forget_bias != 0: self.bias.data[:self.hidden_size] = self.initial_forget_bias def forward(self, input_0, input_1): primals_6 = self.weight_ih primals_7 = self.weight_hh primals_4 = self.weight_sh primals_2 = self.bias primals_3 = self.bias_s primals_1 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
danielsuo/toy_flood
EALSTM
false
15,128
[ "MIT" ]
49
471d3c4091d86d4a00fbf910937d4e60fdaf79a1
https://github.com/danielsuo/toy_flood/tree/471d3c4091d86d4a00fbf910937d4e60fdaf79a1
FMul
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gi/cgijl5qj5bhfo7lmgxbifejwa2bfnmp26pa6hvoyiodkxyzz2q6t.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # x => mul # x_1 => mul_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 10.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 * tmp1 tmp3 = 10.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FMul(nn.Module): def __init__(self): super(FMul, self).__init__() def forward(self, x, y): x = x * y x = x * 10.0 return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tmp3 = 10.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class FMulNew(nn.Module): def __init__(self): super(FMulNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dawnclaude/onnx2keras
FMul
false
15,129
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
GroupWiseLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/lv/clvhqra465e7rlmwkbu5xr6rfzlyh72yhs7d3zsjnfgsh2neujk4.py # Topologically Sorted Source Nodes: [mul, x, x_1], Original ATen: [aten.mul, aten.sum, aten.add] # Source node to ATen node mapping: # mul => mul # x => sum_1 # x_1 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %primals_3), kwargs = {}) triton_poi_fused_add_mul_sum_0 = async_compile.triton('triton_poi_fused_add_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, x, x_1], Original ATen: [aten.mul, aten.sum, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_sum_0.run(primals_1, primals_2, primals_3, buf0, 64, grid=grid(64), stream=stream0) del primals_1 del primals_3 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class GroupWiseLinear(nn.Module): def __init__(self, num_class, hidden_dim, bias=True): super().__init__() self.num_class = num_class self.hidden_dim = hidden_dim self.bias = bias self.W = nn.Parameter(torch.Tensor(1, num_class, hidden_dim)) if bias: self.b = nn.Parameter(torch.Tensor(1, num_class)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.W.size(2)) for i in range(self.num_class): self.W[0][i].data.uniform_(-stdv, stdv) if self.bias: for i in range(self.num_class): self.b[0][i].data.uniform_(-stdv, stdv) def forward(self, x): x = (self.W * x).sum(-1) if self.bias: x = x + self.b return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_class': 4, 'hidden_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sum_0[grid(64)](primals_1, primals_2, primals_3, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class GroupWiseLinearNew(nn.Module): def __init__(self, num_class, hidden_dim, bias=True): super().__init__() self.num_class = num_class self.hidden_dim = hidden_dim self.bias = bias self.W = nn.Parameter(torch.Tensor(1, num_class, hidden_dim)) if bias: self.b = nn.Parameter(torch.Tensor(1, num_class)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.W.size(2)) for i in range(self.num_class): self.W[0][i].data.uniform_(-stdv, stdv) if self.bias: for i in range(self.num_class): self.b[0][i].data.uniform_(-stdv, stdv) def forward(self, input_0): primals_1 = self.W primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
davidaderup/query2labels
GroupWiseLinear
false
15,130
[ "MIT" ]
164
5a10c861dda85d94ba01ec6ad4119eef67a9f441
https://github.com/davidaderup/query2labels/tree/5a10c861dda85d94ba01ec6ad4119eef67a9f441
EncoderImagePrecomp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/q5/cq5voi6tqfyk3zzxnl6i5kre2q2bl3s5mpbahmi6iecgg42j3jri.py # Topologically Sorted Source Nodes: [norm, clamp, features_1], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] # Source node to ATen node mapping: # clamp => clamp_min # features_1 => div # norm => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %clamp_min), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [features], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, clamp, features_1], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init def l2norm(x, dim=-1): return x / x.norm(2, dim=dim, keepdim=True).clamp(min=1e-06) class EncoderImagePrecomp(nn.Module): """ image encoder """ def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImagePrecomp, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """ Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def forward(self, images): """ extract image feature vectors """ features = self.fc(images.float()) if not self.no_imgnorm: features = l2norm(features) return features def load_state_dict(self, state_dict): """ copies parameters, overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecomp, self).load_state_dict(new_state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'img_dim': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0 def l2norm(x, dim=-1): return x / x.norm(2, dim=dim, keepdim=True).clamp(min=1e-06) class EncoderImagePrecompNew(nn.Module): """ image encoder """ def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImagePrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """ Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def load_state_dict(self, state_dict): """ copies parameters, overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecompNew, self).load_state_dict(new_state) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
davidatbu/MLVGSNL
EncoderImagePrecomp
false
15,131
[ "MIT" ]
97
88d42424a0a7badb43e22cd3950948c9522faaa1
https://github.com/davidatbu/MLVGSNL/tree/88d42424a0a7badb43e22cd3950948c9522faaa1
FDiv
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/r6/cr6v6otvbfgqjmrb3g3asbnqvioekxyhkvn4f2szyruy2xcensbv.py # Topologically Sorted Source Nodes: [x, y, x_1], Original ATen: [aten.div] # Source node to ATen node mapping: # x => div # x_1 => div_2 # y => div_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 2), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, 2), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, %div_1), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 / tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, y, x_1], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FDiv(nn.Module): def __init__(self): super(FDiv, self).__init__() def forward(self, x, y): x = x / 2 y = y / 2 x = x / y return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 / tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class FDivNew(nn.Module): def __init__(self): super(FDivNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dawnclaude/onnx2keras
FDiv
false
15,132
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
img_encoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ji/cjiygmucezo6n7zsjqtl32j5ki6pmpzwphrqovziyzrncllqs3nk.py # Topologically Sorted Source Nodes: [sub], Original ATen: [aten.rsub] # Source node to ATen node mapping: # sub => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_1), kwargs = {}) triton_poi_fused_rsub_0 = async_compile.triton('triton_poi_fused_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tl.store(out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qp/cqpnzdbwnsefjalpdd3af24e6rkuiqujtytyh2uiyopqxl7da6qt.py # Topologically Sorted Source Nodes: [layer_0, layer_1], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] # Source node to ATen node mapping: # layer_0 => add, rsqrt, var_mean # layer_1 => gt, mul_1, where # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.02), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_1), kwargs = {}) triton_per_fused__native_batch_norm_legit_leaky_relu_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_1(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): xnumel = 16 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 1024.0 tmp16 = tmp13 / tmp15 tmp17 = 1e-05 tmp18 = tmp16 + tmp17 tmp19 = libdevice.rsqrt(tmp18) tmp20 = tmp14 * tmp19 tmp21 = 0.0 tmp22 = tmp20 > tmp21 tmp23 = 0.02 tmp24 = tmp20 * tmp23 tmp25 = tl.where(tmp22, tmp20, tmp24) tl.store(out_ptr2 + (r1 + (1024*x0)), tmp25, None) tl.store(out_ptr3 + (x0), tmp19, None) tl.store(out_ptr0 + (x0), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uu/cuujr546ffrwy4u3ex3eenwldrf3vpm3rw3p2dwjooqfyutaoehu.py # Topologically Sorted Source Nodes: [output_2, output_3, output_4], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # output_2 => add_2, rsqrt_2, var_mean_2 # output_3 => add_3 # output_4 => gt_2, mul_5, where_2 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_10, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %view_3), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_3, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.02), kwargs = {}) # %where_2 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add_3, %mul_5), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_leaky_relu_2 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): xnumel = 16 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp21 = tl.load(in_ptr1 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 1024.0 tmp16 = tmp13 / tmp15 tmp17 = 1e-05 tmp18 = tmp16 + tmp17 tmp19 = libdevice.rsqrt(tmp18) tmp20 = tmp14 * tmp19 tmp22 = tmp20 + tmp21 tmp23 = 0.0 tmp24 = tmp22 > tmp23 tmp25 = 0.02 tmp26 = tmp22 * tmp25 tmp27 = tl.where(tmp24, tmp22, tmp26) tl.store(out_ptr2 + (r1 + (1024*x0)), tmp27, None) tl.store(out_ptr3 + (x0), tmp19, None) tl.store(out_ptr0 + (x0), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/m3/cm3cjhcayr4gbu3r5spvok2tmexnin3673rzrytnhxb6rljrg7ms.py # Topologically Sorted Source Nodes: [output_10, output_11], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] # Source node to ATen node mapping: # output_10 => add_7, rsqrt_5, var_mean_5 # output_11 => gt_5, mul_11, where_5 # Graph fragment: # %var_mean_5 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_19, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_10, 1e-05), kwargs = {}) # %rsqrt_5 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_7,), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_20, 0), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_20, 0.02), kwargs = {}) # %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %view_20, %mul_11), kwargs = {}) triton_per_fused__native_batch_norm_legit_leaky_relu_3 = async_compile.triton('triton_per_fused__native_batch_norm_legit_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_3(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): xnumel = 32 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 256.0 tmp16 = tmp13 / tmp15 tmp17 = 1e-05 tmp18 = tmp16 + tmp17 tmp19 = libdevice.rsqrt(tmp18) tmp20 = tmp14 * tmp19 tmp21 = 0.0 tmp22 = tmp20 > tmp21 tmp23 = 0.02 tmp24 = tmp20 * tmp23 tmp25 = tl.where(tmp22, tmp20, tmp24) tl.store(out_ptr2 + (r1 + (256*x0)), tmp25, None) tl.store(out_ptr3 + (x0), tmp19, None) tl.store(out_ptr0 + (x0), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2q/c2qkltn7zjyyswtdcvesaz4oj2zyh3nqogkeanudbzlr3uxovoot.py # Topologically Sorted Source Nodes: [output_12, input_, output_13, output_14], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # input_ => add_9, rsqrt_7, var_mean_7 # output_12 => add_8, rsqrt_6, var_mean_6 # output_13 => add_10 # output_14 => gt_6, mul_14, where_6 # Graph fragment: # %var_mean_6 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_24, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_12, 1e-05), kwargs = {}) # %rsqrt_6 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_8,), kwargs = {}) # %var_mean_7 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_26, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_14, 1e-05), kwargs = {}) # %rsqrt_7 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_9,), kwargs = {}) # %add_10 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_25, %view_27), kwargs = {}) # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_10, 0), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_10, 0.02), kwargs = {}) # %where_6 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %add_10, %mul_14), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_leaky_relu_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_leaky_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_leaky_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 8, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr4, out_ptr5, xnumel, rnumel): xnumel = 32 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None) tmp14 = tl.load(in_ptr1 + (r1 + (256*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = tl.broadcast_to(tmp15, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = tmp19 / tmp7 tmp21 = tmp15 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = tmp0 - tmp8 tmp27 = 256.0 tmp28 = tmp13 / tmp27 tmp29 = 1e-05 tmp30 = tmp28 + tmp29 tmp31 = libdevice.rsqrt(tmp30) tmp32 = tmp26 * tmp31 tmp33 = tmp14 - tmp20 tmp34 = tmp25 / tmp27 tmp35 = tmp34 + tmp29 tmp36 = libdevice.rsqrt(tmp35) tmp37 = tmp33 * tmp36 tmp38 = tmp32 + tmp37 tmp39 = 0.0 tmp40 = tmp38 > tmp39 tmp41 = 0.02 tmp42 = tmp38 * tmp41 tmp43 = tl.where(tmp40, tmp38, tmp42) tl.store(in_out_ptr0 + (r1 + (256*x0)), tmp43, None) tl.store(out_ptr4 + (x0), tmp31, None) tl.store(out_ptr5 + (x0), tmp36, None) tl.store(out_ptr0 + (x0), tmp8, None) tl.store(out_ptr2 + (x0), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jg/cjghb4e6e4mot6dwfdqmkyr4mbnndddatip67czkhda3k2scp4yo.py # Topologically Sorted Source Nodes: [output_17, output_18, output_19], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # output_17 => add_12, rsqrt_9, var_mean_9 # output_18 => add_13 # output_19 => gt_8, mul_18, where_8 # Graph fragment: # %var_mean_9 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_33, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_18, 1e-05), kwargs = {}) # %rsqrt_9 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_12,), kwargs = {}) # %add_13 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_34, %where_6), kwargs = {}) # %gt_8 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_13, 0), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_13, 0.02), kwargs = {}) # %where_8 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %add_13, %mul_18), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_leaky_relu_5 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_leaky_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_leaky_relu_5', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): xnumel = 32 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None) tmp21 = tl.load(in_ptr1 + (r1 + (256*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 256.0 tmp16 = tmp13 / tmp15 tmp17 = 1e-05 tmp18 = tmp16 + tmp17 tmp19 = libdevice.rsqrt(tmp18) tmp20 = tmp14 * tmp19 tmp22 = tmp20 + tmp21 tmp23 = 0.0 tmp24 = tmp22 > tmp23 tmp25 = 0.02 tmp26 = tmp22 * tmp25 tmp27 = tl.where(tmp24, tmp22, tmp26) tl.store(out_ptr2 + (r1 + (256*x0)), tmp27, None) tl.store(out_ptr3 + (x0), tmp19, None) tl.store(out_ptr0 + (x0), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zm/czmv2mu4hab64bhwtmh5jphva6l46elvtf3aizq7a4mcbmdyyf3b.py # Topologically Sorted Source Nodes: [output_20, output_21], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] # Source node to ATen node mapping: # output_20 => add_14, rsqrt_10, var_mean_10 # output_21 => gt_9, mul_20, where_9 # Graph fragment: # %var_mean_10 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_35, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_20, 1e-05), kwargs = {}) # %rsqrt_10 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_14,), kwargs = {}) # %gt_9 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_36, 0), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_36, 0.02), kwargs = {}) # %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %view_36, %mul_20), kwargs = {}) triton_per_fused__native_batch_norm_legit_leaky_relu_6 = async_compile.triton('triton_per_fused__native_batch_norm_legit_leaky_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_6(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.02 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tl.store(out_ptr2 + (r1 + (64*x0)), tmp28, xmask) tl.store(out_ptr3 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bx/cbxodlbrntio5pvzj42i6wthdosrnxexccgrbybbkqkdbndno4jr.py # Topologically Sorted Source Nodes: [output_22, input__1, output_23, output_24], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # input__1 => add_16, rsqrt_12, var_mean_12 # output_22 => add_15, rsqrt_11, var_mean_11 # output_23 => add_17 # output_24 => gt_10, mul_23, where_10 # Graph fragment: # %var_mean_11 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_40, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_22, 1e-05), kwargs = {}) # %rsqrt_11 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_15,), kwargs = {}) # %var_mean_12 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_42, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_24, 1e-05), kwargs = {}) # %rsqrt_12 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_16,), kwargs = {}) # %add_17 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_41, %view_43), kwargs = {}) # %gt_10 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_17, 0), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_17, 0.02), kwargs = {}) # %where_10 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_10, %add_17, %mul_23), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_leaky_relu_7 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_leaky_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_leaky_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 8, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp17 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp23 = tl.where(xmask, tmp21, 0) tmp24 = tl.sum(tmp23, 1)[:, None] tmp25 = tmp24 / tmp9 tmp26 = tmp18 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.where(xmask, tmp28, 0) tmp31 = tl.sum(tmp30, 1)[:, None] tmp32 = tmp0 - tmp10 tmp33 = 64.0 tmp34 = tmp16 / tmp33 tmp35 = 1e-05 tmp36 = tmp34 + tmp35 tmp37 = libdevice.rsqrt(tmp36) tmp38 = tmp32 * tmp37 tmp39 = tmp17 - tmp25 tmp40 = tmp31 / tmp33 tmp41 = tmp40 + tmp35 tmp42 = libdevice.rsqrt(tmp41) tmp43 = tmp39 * tmp42 tmp44 = tmp38 + tmp43 tmp45 = 0.0 tmp46 = tmp44 > tmp45 tmp47 = 0.02 tmp48 = tmp44 * tmp47 tmp49 = tl.where(tmp46, tmp44, tmp48) tl.store(in_out_ptr0 + (r1 + (64*x0)), tmp49, xmask) tl.store(out_ptr4 + (x0), tmp37, xmask) tl.store(out_ptr5 + (x0), tmp42, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) tl.store(out_ptr2 + (x0), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/pi/cpipt5xkfdzvmjxazx3n5uofjuwjrzf5ij4bjmhavtruo2qzada5.py # Topologically Sorted Source Nodes: [output_27, output_28, output_29], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # output_27 => add_19, rsqrt_14, var_mean_14 # output_28 => add_20 # output_29 => gt_12, mul_27, where_12 # Graph fragment: # %var_mean_14 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_49, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_28, 1e-05), kwargs = {}) # %rsqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_19,), kwargs = {}) # %add_20 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_50, %where_10), kwargs = {}) # %gt_12 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_20, 0), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_20, 0.02), kwargs = {}) # %where_12 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %add_20, %mul_27), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_leaky_relu_8 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_leaky_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_leaky_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.02 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr2 + (r1 + (64*x0)), tmp30, xmask) tl.store(out_ptr3 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/na/cnaqmtycnktmhsst3mp62ijgphq4jynpdh5rrx53jwt4xrpguiak.py # Topologically Sorted Source Nodes: [layer_9, layer_10], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] # Source node to ATen node mapping: # layer_10 => gt_13, mul_29, where_13 # layer_9 => add_21, rsqrt_15, var_mean_15 # Graph fragment: # %var_mean_15 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_51, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_30, 1e-05), kwargs = {}) # %rsqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_21,), kwargs = {}) # %gt_13 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_52, 0), kwargs = {}) # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_52, 0.02), kwargs = {}) # %where_13 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_13, %view_52, %mul_29), kwargs = {}) triton_per_fused__native_batch_norm_legit_leaky_relu_9 = async_compile.triton('triton_per_fused__native_batch_norm_legit_leaky_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_leaky_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_9(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.02 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tl.store(out_ptr2 + (r1 + (16*x0)), tmp28, xmask) tl.store(out_ptr3 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/c3/cc33ghyzlac4immomxzyjr2a5ytrc6l35tkp35qwwkt7rpfs44x7.py # Topologically Sorted Source Nodes: [layer_11], Original ATen: [aten.convolution] # Source node to ATen node mapping: # layer_11 => convolution_16 # Graph fragment: # %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_54, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (4, 1, 7, 7), (49, 49, 7, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_8, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_9, (8, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_11, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_12, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_13, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_14, (16, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_15, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_16, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_17, (16, 16, 4, 4), (256, 16, 4, 1)) assert_size_stride(primals_18, (4, 16, 4, 4), (256, 16, 4, 1)) assert_size_stride(primals_19, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [sub], Original ATen: [aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_rsub_0.run(primals_1, buf0, 16384, grid=grid(16384), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_0, layer_1], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_leaky_relu_1.run(buf1, buf2, buf6, buf5, 16, 1024, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf8 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf12 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_leaky_relu_1.run(buf7, buf8, buf12, buf11, 16, 1024, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf12, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf14 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf18 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf17 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [output_2, output_3, output_4], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_add_leaky_relu_2.run(buf13, buf6, buf14, buf18, buf17, 16, 1024, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf20 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf24 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf23 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [output_5, output_6], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_leaky_relu_1.run(buf19, buf20, buf24, buf23, 16, 1024, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf25 = extern_kernels.convolution(buf24, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf26 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf30 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf29 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [output_7, output_8, output_9], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_add_leaky_relu_2.run(buf25, buf18, buf26, buf30, buf29, 16, 1024, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf31 = extern_kernels.convolution(buf30, primals_7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 8, 16, 16), (2048, 256, 16, 1)) buf32 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) buf36 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32) buf35 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [output_10, output_11], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_leaky_relu_3.run(buf31, buf32, buf36, buf35, 32, 256, grid=grid(32), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf37 = extern_kernels.convolution(buf36, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 8, 16, 16), (2048, 256, 16, 1)) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf30, primals_9, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 8, 16, 16), (2048, 256, 16, 1)) buf38 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) buf43 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) buf47 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32) buf48 = buf47; del buf47 # reuse buf41 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) buf46 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [output_12, input_, output_13, output_14], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_add_leaky_relu_4.run(buf48, buf37, buf42, buf38, buf43, buf41, buf46, 32, 256, grid=grid(32), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf49 = extern_kernels.convolution(buf48, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf49, (4, 8, 16, 16), (2048, 256, 16, 1)) buf50 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) buf54 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32) buf53 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [output_15, output_16], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_leaky_relu_3.run(buf49, buf50, buf54, buf53, 32, 256, grid=grid(32), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf55 = extern_kernels.convolution(buf54, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 8, 16, 16), (2048, 256, 16, 1)) buf56 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) buf60 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32) buf59 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [output_17, output_18, output_19], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_add_leaky_relu_5.run(buf55, buf48, buf56, buf60, buf59, 32, 256, grid=grid(32), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf61 = extern_kernels.convolution(buf60, primals_12, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 16, 8, 8), (1024, 64, 8, 1)) buf62 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) buf66 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.float32) buf65 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) # Topologically Sorted Source Nodes: [output_20, output_21], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_leaky_relu_6.run(buf61, buf62, buf66, buf65, 64, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf67 = extern_kernels.convolution(buf66, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 16, 8, 8), (1024, 64, 8, 1)) # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf72 = extern_kernels.convolution(buf60, primals_14, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf72, (4, 16, 8, 8), (1024, 64, 8, 1)) buf68 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) buf73 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) buf77 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.float32) buf78 = buf77; del buf77 # reuse buf71 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) buf76 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) # Topologically Sorted Source Nodes: [output_22, input__1, output_23, output_24], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_add_leaky_relu_7.run(buf78, buf67, buf72, buf68, buf73, buf71, buf76, 64, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf79 = extern_kernels.convolution(buf78, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf79, (4, 16, 8, 8), (1024, 64, 8, 1)) buf80 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) buf84 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.float32) buf83 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) # Topologically Sorted Source Nodes: [output_25, output_26], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_leaky_relu_6.run(buf79, buf80, buf84, buf83, 64, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf85 = extern_kernels.convolution(buf84, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf85, (4, 16, 8, 8), (1024, 64, 8, 1)) buf86 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) buf90 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.float32) buf89 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) # Topologically Sorted Source Nodes: [output_27, output_28, output_29], Original ATen: [aten._native_batch_norm_legit, aten.add, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_add_leaky_relu_8.run(buf85, buf78, buf86, buf90, buf89, 64, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf91 = extern_kernels.convolution(buf90, primals_17, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf91, (4, 16, 4, 4), (256, 16, 4, 1)) buf92 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) buf96 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32) buf95 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32) # Topologically Sorted Source Nodes: [layer_9, layer_10], Original ATen: [aten._native_batch_norm_legit, aten.leaky_relu] triton_per_fused__native_batch_norm_legit_leaky_relu_9.run(buf91, buf92, buf96, buf95, 64, 16, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [layer_11], Original ATen: [aten.convolution] buf97 = extern_kernels.convolution(buf96, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf97, (4, 4, 1, 1), (4, 1, 1, 1)) buf98 = reinterpret_tensor(buf97, (4, 4, 1, 1), (4, 1, 16, 16), 0); del buf97 # reuse # Topologically Sorted Source Nodes: [layer_11], Original ATen: [aten.convolution] triton_poi_fused_convolution_10.run(buf98, primals_19, 16, grid=grid(16), stream=stream0) del primals_19 return (reinterpret_tensor(buf98, (16, ), (1, ), 0), primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, buf0, buf1, reinterpret_tensor(buf5, (16, ), (1, ), 0), buf6, buf7, reinterpret_tensor(buf11, (16, ), (1, ), 0), buf12, buf13, reinterpret_tensor(buf17, (16, ), (1, ), 0), buf18, buf19, reinterpret_tensor(buf23, (16, ), (1, ), 0), buf24, buf25, reinterpret_tensor(buf29, (16, ), (1, ), 0), buf30, buf31, reinterpret_tensor(buf35, (32, ), (1, ), 0), buf36, buf37, reinterpret_tensor(buf41, (32, ), (1, ), 0), buf42, reinterpret_tensor(buf46, (32, ), (1, ), 0), buf48, buf49, reinterpret_tensor(buf53, (32, ), (1, ), 0), buf54, buf55, reinterpret_tensor(buf59, (32, ), (1, ), 0), buf60, buf61, reinterpret_tensor(buf65, (64, ), (1, ), 0), buf66, buf67, reinterpret_tensor(buf71, (64, ), (1, ), 0), buf72, reinterpret_tensor(buf76, (64, ), (1, ), 0), buf78, buf79, reinterpret_tensor(buf83, (64, ), (1, ), 0), buf84, buf85, reinterpret_tensor(buf89, (64, ), (1, ), 0), buf90, buf91, reinterpret_tensor(buf95, (64, ), (1, ), 0), buf96, reinterpret_tensor(buf92, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf86, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf80, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf73, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf68, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf62, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf56, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf50, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf43, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf38, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf32, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf26, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf20, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf14, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf8, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 7, 7), (49, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((8, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((8, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((16, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((16, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class resnet_block(nn.Module): def __init__(self, dim_in, dim_out): super(resnet_block, self).__init__() self.dim_in = dim_in self.dim_out = dim_out if self.dim_in == self.dim_out: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_1 = nn.InstanceNorm2d(self.dim_out) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_2 = nn.InstanceNorm2d(self.dim_out) else: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False) self.bn_1 = nn.InstanceNorm2d(self.dim_out) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_2 = nn.InstanceNorm2d(self.dim_out) self.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False) self.bn_s = nn.InstanceNorm2d(self.dim_out) def forward(self, input): if self.dim_in == self.dim_out: output = self.bn_1(self.conv_1(input)) output = F.leaky_relu(output, negative_slope=0.02, inplace=True) output = self.bn_2(self.conv_2(output)) output = output + input output = F.leaky_relu(output, negative_slope=0.02, inplace=True) else: output = self.bn_1(self.conv_1(input)) output = F.leaky_relu(output, negative_slope=0.02, inplace=True) output = self.bn_2(self.conv_2(output)) input_ = self.bn_s(self.conv_s(input)) output = output + input_ output = F.leaky_relu(output, negative_slope=0.02, inplace=True) return output class img_encoder(nn.Module): def __init__(self, img_ef_dim, z_dim): super(img_encoder, self).__init__() self.img_ef_dim = img_ef_dim self.z_dim = z_dim self.conv_0 = nn.Conv2d(1, self.img_ef_dim, 7, stride=2, padding=3, bias=False) self.bn_0 = nn.InstanceNorm2d(self.img_ef_dim) self.res_1 = resnet_block(self.img_ef_dim, self.img_ef_dim) self.res_2 = resnet_block(self.img_ef_dim, self.img_ef_dim) self.res_3 = resnet_block(self.img_ef_dim, self.img_ef_dim * 2) self.res_4 = resnet_block(self.img_ef_dim * 2, self.img_ef_dim * 2) self.res_5 = resnet_block(self.img_ef_dim * 2, self.img_ef_dim * 4) self.res_6 = resnet_block(self.img_ef_dim * 4, self.img_ef_dim * 4) self.conv_9 = nn.Conv2d(self.img_ef_dim * 4, self.img_ef_dim * 4, 4, stride=2, padding=1, bias=False) self.bn_9 = nn.InstanceNorm2d(self.img_ef_dim * 4) self.conv_10 = nn.Conv2d(self.img_ef_dim * 4, self.z_dim, 4, stride =1, padding=0, bias=True) def forward(self, view): layer_0 = self.bn_0(self.conv_0(1 - view)) layer_0 = F.leaky_relu(layer_0, negative_slope=0.02, inplace=True) layer_1 = self.res_1(layer_0) layer_2 = self.res_2(layer_1) layer_3 = self.res_3(layer_2) layer_4 = self.res_4(layer_3) layer_5 = self.res_5(layer_4) layer_6 = self.res_6(layer_5) layer_9 = self.bn_9(self.conv_9(layer_6)) layer_9 = F.leaky_relu(layer_9, negative_slope=0.02, inplace=True) layer_10 = self.conv_10(layer_9) layer_10 = layer_10.view(-1) return layer_10 def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {'img_ef_dim': 4, 'z_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tl.store(out_ptr0 + x0, tmp2, None) @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_1(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 1024.0 tmp16 = tmp13 / tmp15 tmp17 = 1e-05 tmp18 = tmp16 + tmp17 tmp19 = libdevice.rsqrt(tmp18) tmp20 = tmp14 * tmp19 tmp21 = 0.0 tmp22 = tmp20 > tmp21 tmp23 = 0.02 tmp24 = tmp20 * tmp23 tmp25 = tl.where(tmp22, tmp20, tmp24) tl.store(out_ptr2 + (r1 + 1024 * x0), tmp25, None) tl.store(out_ptr3 + x0, tmp19, None) tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp21 = tl.load(in_ptr1 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 1024.0 tmp16 = tmp13 / tmp15 tmp17 = 1e-05 tmp18 = tmp16 + tmp17 tmp19 = libdevice.rsqrt(tmp18) tmp20 = tmp14 * tmp19 tmp22 = tmp20 + tmp21 tmp23 = 0.0 tmp24 = tmp22 > tmp23 tmp25 = 0.02 tmp26 = tmp22 * tmp25 tmp27 = tl.where(tmp24, tmp22, tmp26) tl.store(out_ptr2 + (r1 + 1024 * x0), tmp27, None) tl.store(out_ptr3 + x0, tmp19, None) tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_3(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 256.0 tmp16 = tmp13 / tmp15 tmp17 = 1e-05 tmp18 = tmp16 + tmp17 tmp19 = libdevice.rsqrt(tmp18) tmp20 = tmp14 * tmp19 tmp21 = 0.0 tmp22 = tmp20 > tmp21 tmp23 = 0.02 tmp24 = tmp20 * tmp23 tmp25 = tl.where(tmp22, tmp20, tmp24) tl.store(out_ptr2 + (r1 + 256 * x0), tmp25, None) tl.store(out_ptr3 + x0, tmp19, None) tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr4, out_ptr5, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp14 = tl.load(in_ptr1 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = tl.broadcast_to(tmp15, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = tmp19 / tmp7 tmp21 = tmp15 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = tmp0 - tmp8 tmp27 = 256.0 tmp28 = tmp13 / tmp27 tmp29 = 1e-05 tmp30 = tmp28 + tmp29 tmp31 = libdevice.rsqrt(tmp30) tmp32 = tmp26 * tmp31 tmp33 = tmp14 - tmp20 tmp34 = tmp25 / tmp27 tmp35 = tmp34 + tmp29 tmp36 = libdevice.rsqrt(tmp35) tmp37 = tmp33 * tmp36 tmp38 = tmp32 + tmp37 tmp39 = 0.0 tmp40 = tmp38 > tmp39 tmp41 = 0.02 tmp42 = tmp38 * tmp41 tmp43 = tl.where(tmp40, tmp38, tmp42) tl.store(in_out_ptr0 + (r1 + 256 * x0), tmp43, None) tl.store(out_ptr4 + x0, tmp31, None) tl.store(out_ptr5 + x0, tmp36, None) tl.store(out_ptr0 + x0, tmp8, None) tl.store(out_ptr2 + x0, tmp20, None) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp21 = tl.load(in_ptr1 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 256.0 tmp16 = tmp13 / tmp15 tmp17 = 1e-05 tmp18 = tmp16 + tmp17 tmp19 = libdevice.rsqrt(tmp18) tmp20 = tmp14 * tmp19 tmp22 = tmp20 + tmp21 tmp23 = 0.0 tmp24 = tmp22 > tmp23 tmp25 = 0.02 tmp26 = tmp22 * tmp25 tmp27 = tl.where(tmp24, tmp22, tmp26) tl.store(out_ptr2 + (r1 + 256 * x0), tmp27, None) tl.store(out_ptr3 + x0, tmp19, None) tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_6(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.02 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tl.store(out_ptr2 + (r1 + 64 * x0), tmp28, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp17 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tl.where(xmask, tmp18, 0) tmp21 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp23 = tl.where(xmask, tmp21, 0) tmp24 = tl.sum(tmp23, 1)[:, None] tmp25 = tmp24 / tmp9 tmp26 = tmp18 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.where(xmask, tmp28, 0) tmp31 = tl.sum(tmp30, 1)[:, None] tmp32 = tmp0 - tmp10 tmp33 = 64.0 tmp34 = tmp16 / tmp33 tmp35 = 1e-05 tmp36 = tmp34 + tmp35 tmp37 = libdevice.rsqrt(tmp36) tmp38 = tmp32 * tmp37 tmp39 = tmp17 - tmp25 tmp40 = tmp31 / tmp33 tmp41 = tmp40 + tmp35 tmp42 = libdevice.rsqrt(tmp41) tmp43 = tmp39 * tmp42 tmp44 = tmp38 + tmp43 tmp45 = 0.0 tmp46 = tmp44 > tmp45 tmp47 = 0.02 tmp48 = tmp44 * tmp47 tmp49 = tl.where(tmp46, tmp44, tmp48) tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp49, xmask) tl.store(out_ptr4 + x0, tmp37, xmask) tl.store(out_ptr5 + x0, tmp42, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr2 + x0, tmp25, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 64.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = 0.0 tmp27 = tmp25 > tmp26 tmp28 = 0.02 tmp29 = tmp25 * tmp28 tmp30 = tl.where(tmp27, tmp25, tmp29) tl.store(out_ptr2 + (r1 + 64 * x0), tmp30, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_leaky_relu_9(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = 0.0 tmp25 = tmp23 > tmp24 tmp26 = 0.02 tmp27 = tmp23 * tmp26 tmp28 = tl.where(tmp25, tmp23, tmp27) tl.store(out_ptr2 + (r1 + 16 * x0), tmp28, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (4, 1, 7, 7), (49, 49, 7, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_8, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_9, (8, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_11, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_12, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_13, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_14, (16, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_15, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_16, (16, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_17, (16, 16, 4, 4), (256, 16, 4, 1)) assert_size_stride(primals_18, (4, 16, 4, 4), (256, 16, 4, 1)) assert_size_stride(primals_19, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_rsub_0[grid(16384)](primals_1, buf0, 16384, XBLOCK =128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf6 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) triton_per_fused__native_batch_norm_legit_leaky_relu_1[grid(16)](buf1, buf2, buf6, buf5, 16, 1024, num_warps=8, num_stages=1) buf7 = extern_kernels.convolution(buf6, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf8 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf12 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf11 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_leaky_relu_1[grid(16)](buf7, buf8, buf12, buf11, 16, 1024, num_warps=8, num_stages=1) buf13 = extern_kernels.convolution(buf12, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf14 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf18 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf17 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_add_leaky_relu_2[grid(16)]( buf13, buf6, buf14, buf18, buf17, 16, 1024, num_warps=8, num_stages=1) buf19 = extern_kernels.convolution(buf18, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf20 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf24 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf23 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_leaky_relu_1[grid(16)](buf19, buf20, buf24, buf23, 16, 1024, num_warps=8, num_stages=1) buf25 = extern_kernels.convolution(buf24, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 4, 32, 32), (4096, 1024, 32, 1)) buf26 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf30 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) buf29 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_add_leaky_relu_2[grid(16)]( buf25, buf18, buf26, buf30, buf29, 16, 1024, num_warps=8, num_stages=1) buf31 = extern_kernels.convolution(buf30, primals_7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 8, 16, 16), (2048, 256, 16, 1)) buf32 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch. float32) buf36 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32) buf35 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch. float32) triton_per_fused__native_batch_norm_legit_leaky_relu_3[grid(32)](buf31, buf32, buf36, buf35, 32, 256, num_warps=2, num_stages=1) buf37 = extern_kernels.convolution(buf36, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 8, 16, 16), (2048, 256, 16, 1)) buf42 = extern_kernels.convolution(buf30, primals_9, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 8, 16, 16), (2048, 256, 16, 1)) buf38 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch. float32) buf43 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch. float32) buf47 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32) buf48 = buf47 del buf47 buf41 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch. float32) buf46 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch. float32) triton_per_fused__native_batch_norm_legit_add_leaky_relu_4[grid(32)]( buf48, buf37, buf42, buf38, buf43, buf41, buf46, 32, 256, num_warps=2, num_stages=1) buf49 = extern_kernels.convolution(buf48, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf49, (4, 8, 16, 16), (2048, 256, 16, 1)) buf50 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch. float32) buf54 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32) buf53 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch. float32) triton_per_fused__native_batch_norm_legit_leaky_relu_3[grid(32)](buf49, buf50, buf54, buf53, 32, 256, num_warps=2, num_stages=1) buf55 = extern_kernels.convolution(buf54, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 8, 16, 16), (2048, 256, 16, 1)) buf56 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch. float32) buf60 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch.float32) buf59 = empty_strided_cuda((1, 32, 1, 1), (32, 1, 32, 32), torch. float32) triton_per_fused__native_batch_norm_legit_add_leaky_relu_5[grid(32)]( buf55, buf48, buf56, buf60, buf59, 32, 256, num_warps=2, num_stages=1) buf61 = extern_kernels.convolution(buf60, primals_12, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 16, 8, 8), (1024, 64, 8, 1)) buf62 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) buf66 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch. float32) buf65 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) triton_per_fused__native_batch_norm_legit_leaky_relu_6[grid(64)](buf61, buf62, buf66, buf65, 64, 64, XBLOCK=32, num_warps=8, num_stages=1) buf67 = extern_kernels.convolution(buf66, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 16, 8, 8), (1024, 64, 8, 1)) buf72 = extern_kernels.convolution(buf60, primals_14, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf72, (4, 16, 8, 8), (1024, 64, 8, 1)) buf68 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) buf73 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) buf77 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch. float32) buf78 = buf77 del buf77 buf71 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) buf76 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) triton_per_fused__native_batch_norm_legit_add_leaky_relu_7[grid(64)]( buf78, buf67, buf72, buf68, buf73, buf71, buf76, 64, 64, XBLOCK =8, num_warps=4, num_stages=1) buf79 = extern_kernels.convolution(buf78, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf79, (4, 16, 8, 8), (1024, 64, 8, 1)) buf80 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) buf84 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch. float32) buf83 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) triton_per_fused__native_batch_norm_legit_leaky_relu_6[grid(64)](buf79, buf80, buf84, buf83, 64, 64, XBLOCK=32, num_warps=8, num_stages=1) buf85 = extern_kernels.convolution(buf84, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf85, (4, 16, 8, 8), (1024, 64, 8, 1)) buf86 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) buf90 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch. float32) buf89 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) triton_per_fused__native_batch_norm_legit_add_leaky_relu_8[grid(64)]( buf85, buf78, buf86, buf90, buf89, 64, 64, XBLOCK=32, num_warps =8, num_stages=1) buf91 = extern_kernels.convolution(buf90, primals_17, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf91, (4, 16, 4, 4), (256, 16, 4, 1)) buf92 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) buf96 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) buf95 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch. float32) triton_per_fused__native_batch_norm_legit_leaky_relu_9[grid(64)](buf91, buf92, buf96, buf95, 64, 16, XBLOCK=8, num_warps=2, num_stages=1) buf97 = extern_kernels.convolution(buf96, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf97, (4, 4, 1, 1), (4, 1, 1, 1)) buf98 = reinterpret_tensor(buf97, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf97 triton_poi_fused_convolution_10[grid(16)](buf98, primals_19, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_19 return (reinterpret_tensor(buf98, (16,), (1,), 0), primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, buf0, buf1, reinterpret_tensor(buf5, (16,), (1,), 0), buf6, buf7, reinterpret_tensor(buf11, (16,), (1,), 0), buf12, buf13, reinterpret_tensor(buf17, (16,), (1,), 0), buf18, buf19, reinterpret_tensor(buf23, (16,), (1,), 0), buf24, buf25, reinterpret_tensor(buf29, (16,), (1,), 0), buf30, buf31, reinterpret_tensor(buf35, (32,), (1,), 0), buf36, buf37, reinterpret_tensor(buf41, (32,), (1,), 0), buf42, reinterpret_tensor(buf46, (32,), (1,), 0), buf48, buf49, reinterpret_tensor(buf53, (32,), (1,), 0), buf54, buf55, reinterpret_tensor(buf59, (32,), (1,), 0), buf60, buf61, reinterpret_tensor(buf65, (64,), (1,), 0), buf66, buf67, reinterpret_tensor(buf71, (64,), (1,), 0), buf72, reinterpret_tensor(buf76, (64,), (1,), 0), buf78, buf79, reinterpret_tensor(buf83, (64,), (1,), 0), buf84, buf85, reinterpret_tensor(buf89, (64,), (1,), 0), buf90, buf91, reinterpret_tensor(buf95, (64,), (1,), 0), buf96, reinterpret_tensor(buf92, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf86, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf80, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf73, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf68, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf62, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf56, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf50, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf43, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf38, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf32, (1, 32, 1, 1), (32, 1, 1, 1), 0), reinterpret_tensor(buf26, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf20, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf14, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf8, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)) class resnet_block(nn.Module): def __init__(self, dim_in, dim_out): super(resnet_block, self).__init__() self.dim_in = dim_in self.dim_out = dim_out if self.dim_in == self.dim_out: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_1 = nn.InstanceNorm2d(self.dim_out) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_2 = nn.InstanceNorm2d(self.dim_out) else: self.conv_1 = nn.Conv2d(self.dim_in, self.dim_out, 3, stride=2, padding=1, bias=False) self.bn_1 = nn.InstanceNorm2d(self.dim_out) self.conv_2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, bias=False) self.bn_2 = nn.InstanceNorm2d(self.dim_out) self.conv_s = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=2, padding=0, bias=False) self.bn_s = nn.InstanceNorm2d(self.dim_out) def forward(self, input): if self.dim_in == self.dim_out: output = self.bn_1(self.conv_1(input)) output = F.leaky_relu(output, negative_slope=0.02, inplace=True) output = self.bn_2(self.conv_2(output)) output = output + input output = F.leaky_relu(output, negative_slope=0.02, inplace=True) else: output = self.bn_1(self.conv_1(input)) output = F.leaky_relu(output, negative_slope=0.02, inplace=True) output = self.bn_2(self.conv_2(output)) input_ = self.bn_s(self.conv_s(input)) output = output + input_ output = F.leaky_relu(output, negative_slope=0.02, inplace=True) return output class img_encoderNew(nn.Module): def __init__(self, img_ef_dim, z_dim): super(img_encoderNew, self).__init__() self.img_ef_dim = img_ef_dim self.z_dim = z_dim self.conv_0 = nn.Conv2d(1, self.img_ef_dim, 7, stride=2, padding=3, bias=False) self.bn_0 = nn.InstanceNorm2d(self.img_ef_dim) self.res_1 = resnet_block(self.img_ef_dim, self.img_ef_dim) self.res_2 = resnet_block(self.img_ef_dim, self.img_ef_dim) self.res_3 = resnet_block(self.img_ef_dim, self.img_ef_dim * 2) self.res_4 = resnet_block(self.img_ef_dim * 2, self.img_ef_dim * 2) self.res_5 = resnet_block(self.img_ef_dim * 2, self.img_ef_dim * 4) self.res_6 = resnet_block(self.img_ef_dim * 4, self.img_ef_dim * 4) self.conv_9 = nn.Conv2d(self.img_ef_dim * 4, self.img_ef_dim * 4, 4, stride=2, padding=1, bias=False) self.bn_9 = nn.InstanceNorm2d(self.img_ef_dim * 4) self.conv_10 = nn.Conv2d(self.img_ef_dim * 4, self.z_dim, 4, stride =1, padding=0, bias=True) def forward(self, input_0): primals_2 = self.conv_0.weight primals_3 = self.res_1.conv_1.weight primals_4 = self.res_1.conv_2.weight primals_5 = self.res_2.conv_1.weight primals_6 = self.res_2.conv_2.weight primals_7 = self.res_3.conv_1.weight primals_8 = self.res_3.conv_2.weight primals_9 = self.res_3.conv_s.weight primals_10 = self.res_4.conv_1.weight primals_11 = self.res_4.conv_2.weight primals_12 = self.res_5.conv_1.weight primals_13 = self.res_5.conv_2.weight primals_14 = self.res_5.conv_s.weight primals_15 = self.res_6.conv_1.weight primals_16 = self.res_6.conv_2.weight primals_17 = self.conv_9.weight primals_18 = self.conv_10.weight primals_19 = self.conv_10.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0]
czq142857/DECOR-GAN
img_encoder
false
15,133
[ "MIT" ]
55
79c80fc202b8af982989a3e3bb3afe85e606b71f
https://github.com/czq142857/DECOR-GAN/tree/79c80fc202b8af982989a3e3bb3afe85e606b71f
LSTM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/pc/cpcr3623cktnc2724ttepfbk25u5ng32io6u3pxmpjwbdpi5psaw.py # Topologically Sorted Source Nodes: [h_0], Original ATen: [aten.zero] # Source node to ATen node mapping: # h_0 => full # Graph fragment: # %full : [num_users=3] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_zero_0 = async_compile.triton('triton_poi_fused_zero_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zero_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_zero_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/zq/czqzkhlox35fklur6ihxj3macd26s7fcwmyoi3mjplrpxqb66tn4.py # Topologically Sorted Source Nodes: [sigmoid, mul, sigmoid_1, tanh, mul_1, c_1, sigmoid_2, tanh_1, h_1], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add, aten.sigmoid_backward] # Source node to ATen node mapping: # c_1 => add_1 # h_1 => mul_2 # mul => mul # mul_1 => mul_1 # sigmoid => sigmoid # sigmoid_1 => sigmoid_1 # sigmoid_2 => sigmoid_2 # tanh => tanh # tanh_1 => tanh_1 # Graph fragment: # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %full), kwargs = {}) # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_3,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %tanh), kwargs = {}) # %add_1 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_2,), kwargs = {}) # %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %mul_2 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {}) # %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_73 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_19), kwargs = {}) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp13 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp25 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp26 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = 0.0 tmp19 = tmp17 * tmp18 tmp20 = tmp5 * tmp11 tmp21 = tmp19 + tmp20 tmp22 = 1.0 tmp23 = tmp22 - tmp17 tmp24 = tmp17 * tmp23 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = libdevice.tanh(tmp21) tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp11, xmask) tl.store(out_ptr2 + (x2), tmp21, xmask) tl.store(out_ptr3 + (x2), tmp24, xmask) tl.store(out_ptr4 + (x2), tmp30, xmask) tl.store(out_ptr5 + (x2), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/hw/chwjbojajdvf753zel4valmd7qcz6snsa2bu3zhkovdyszf3rzes.py # Topologically Sorted Source Nodes: [sigmoid_3, mul_3, sigmoid_4, tanh_2, mul_4, c_2, sigmoid_5, tanh_3, h_2], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add] # Source node to ATen node mapping: # c_2 => add_3 # h_2 => mul_5 # mul_3 => mul_3 # mul_4 => mul_4 # sigmoid_3 => sigmoid_3 # sigmoid_4 => sigmoid_4 # sigmoid_5 => sigmoid_5 # tanh_2 => tanh_2 # tanh_3 => tanh_3 # Graph fragment: # %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_4,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_3, %add_1), kwargs = {}) # %sigmoid_4 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_5,), kwargs = {}) # %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_7,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_4, %tanh_2), kwargs = {}) # %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {}) # %sigmoid_5 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_6,), kwargs = {}) # %tanh_3 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_3,), kwargs = {}) # %mul_5 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_5, %tanh_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp13 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp18 = tl.load(in_ptr3 + (x2), xmask) tmp22 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp23 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp19 = tmp5 * tmp18 tmp20 = tmp11 * tmp17 tmp21 = tmp19 + tmp20 tmp24 = tmp22 + tmp23 tmp26 = tmp24 + tmp25 tmp27 = tl.sigmoid(tmp26) tmp28 = libdevice.tanh(tmp21) tmp29 = tmp27 * tmp28 tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp11, xmask) tl.store(out_ptr2 + (x2), tmp17, xmask) tl.store(out_ptr3 + (x2), tmp21, xmask) tl.store(out_ptr4 + (x2), tmp27, xmask) tl.store(out_ptr5 + (x2), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jl/cjluwms2whl2lvm4jykyhs4ioawu2sx57fya6l5f2y4gvg36hs5h.py # Topologically Sorted Source Nodes: [sigmoid_9, mul_9, sigmoid_10, tanh_6, mul_10, c_4, tanh_7], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add] # Source node to ATen node mapping: # c_4 => add_7 # mul_10 => mul_10 # mul_9 => mul_9 # sigmoid_10 => sigmoid_10 # sigmoid_9 => sigmoid_9 # tanh_6 => tanh_6 # tanh_7 => tanh_7 # Graph fragment: # %sigmoid_9 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_12,), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_9, %add_5), kwargs = {}) # %sigmoid_10 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_13,), kwargs = {}) # %tanh_6 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_15,), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_10, %tanh_6), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mul_10), kwargs = {}) # %tanh_7 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_7,), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask) tmp12 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp13 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask) tmp18 = tl.load(in_ptr3 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp19 = tmp5 * tmp18 tmp20 = tmp11 * tmp17 tmp21 = tmp19 + tmp20 tmp22 = libdevice.tanh(tmp21) tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp11, xmask) tl.store(out_ptr2 + (x2), tmp17, xmask) tl.store(out_ptr3 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/tn/ctnk3tco2xzy2lj4un5qf4sk4vennxmerbmibgw3yiebiljtterf.py # Topologically Sorted Source Nodes: [sigmoid_11], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid_11 => sigmoid_11 # Graph fragment: # %sigmoid_11 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_14,), kwargs = {}) triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tl.store(out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mc/cmcgwy75mchsexfjy4ga5bobksauuj2ou2ekxspblctnukxiexex.py # Topologically Sorted Source Nodes: [c_n], Original ATen: [aten.stack] # Source node to ATen node mapping: # c_n => cat_1 # Graph fragment: # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add_1, %add_3, %add_5, %add_7],), kwargs = {}) triton_poi_fused_stack_5 = async_compile.triton('triton_poi_fused_stack_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr2 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.load(in_ptr4 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp23 = tl.load(in_ptr5 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp16, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp15, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7v/c7vzfzfugsasaoi62cwx45au5eeuftkul3wi2lpzxsy6petydqdo.py # Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack] # Source node to ATen node mapping: # h_n => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_2, %mul_5, %mul_8, %mul_11],), kwargs = {}) triton_poi_fused_stack_6 = async_compile.triton('triton_poi_fused_stack_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr4 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tl.where(tmp14, tmp15, tmp23) tmp25 = tl.where(tmp9, tmp10, tmp24) tmp26 = tl.where(tmp4, tmp5, tmp25) tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 16), (16, 1)) assert_size_stride(primals_4, (4, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_0], Original ATen: [aten.zero] stream0 = get_raw_stream(0) triton_poi_fused_zero_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, primals_3, out=buf1) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), primals_4, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, sigmoid_1, tanh, mul_1, c_1, sigmoid_2, tanh_1, h_1], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add, aten.sigmoid_backward] triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1.run(buf1, primals_2, buf2, buf3, buf4, buf5, buf33, buf6, buf7, 16, grid=grid(16), stream=stream0) buf8 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf7, primals_3, out=buf8) buf9 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 4), primals_4, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_3, mul_3, sigmoid_4, tanh_2, mul_4, c_2, sigmoid_5, tanh_3, h_2], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf8, primals_2, buf9, buf5, buf10, buf11, buf12, buf13, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf15, primals_3, out=buf16) buf17 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [mm_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 8), primals_4, out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_6, mul_6, sigmoid_7, tanh_4, mul_7, c_3, sigmoid_8, tanh_5, h_3], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf16, primals_2, buf17, buf13, buf18, buf19, buf20, buf21, buf22, buf23, 16, grid=grid(16), stream=stream0) buf24 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf23, primals_3, out=buf24) buf25 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [mm_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 12), primals_4, out=buf25) del primals_4 buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_9, mul_9, sigmoid_10, tanh_6, mul_10, c_4, tanh_7], Original ATen: [aten.sigmoid, aten.mul, aten.tanh, aten.add] triton_poi_fused_add_mul_sigmoid_tanh_3.run(buf24, primals_2, buf25, buf21, buf26, buf27, buf28, buf30, 16, grid=grid(16), stream=stream0) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_11], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_4.run(buf24, primals_2, buf25, buf29, 16, grid=grid(16), stream=stream0) del primals_2 buf31 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0); del buf25 # reuse # Topologically Sorted Source Nodes: [c_n], Original ATen: [aten.stack] triton_poi_fused_stack_5.run(buf5, buf13, buf21, buf26, buf27, buf28, buf31, 64, grid=grid(64), stream=stream0) buf32 = reinterpret_tensor(buf24, (16, 4), (4, 1), 0); del buf24 # reuse # Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack] triton_poi_fused_stack_6.run(buf7, buf15, buf23, buf29, buf30, buf32, 64, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf32, (4, 4, 4), (4, 16, 1), 0), reinterpret_tensor(buf31, (4, 4, 4), (4, 16, 1), 0), buf0, buf3, buf4, buf5, buf6, buf10, buf11, buf12, buf13, buf14, buf18, buf19, buf20, buf21, buf22, buf26, buf27, buf28, buf29, buf30, reinterpret_tensor(primals_1, (4, 4), (1, 16), 12), reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), reinterpret_tensor(buf23, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(buf15, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 16), 4), reinterpret_tensor(buf7, (4, 4), (1, 4), 0), buf33, reinterpret_tensor(primals_1, (4, 4), (1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import Tuple import torch.nn as nn class LSTM(nn.Module): """Implementation of the standard LSTM. TODO: Include ref and LaTeX equations Parameters ---------- input_size : int Number of input features hidden_size : int Number of hidden/memory cells. batch_first : bool, optional If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the shape has to be [seq, batch, features], by default True. initial_forget_bias : int, optional Value of the initial forget gate bias, by default 0 """ def __init__(self, input_size: 'int', hidden_size: 'int', batch_first: 'bool'=True, initial_forget_bias: 'int'=0): super(LSTM, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.batch_first = batch_first self.initial_forget_bias = initial_forget_bias self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 4 * hidden_size)) self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 4 * hidden_size)) self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size)) self.reset_parameters() def reset_parameters(self): """Initialize all learnable parameters of the LSTM""" nn.init.orthogonal_(self.weight_ih.data) weight_hh_data = torch.eye(self.hidden_size) weight_hh_data = weight_hh_data.repeat(1, 4) self.weight_hh.data = weight_hh_data nn.init.constant_(self.bias.data, val=0) if self.initial_forget_bias != 0: self.bias.data[:self.hidden_size] = self.initial_forget_bias def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]: """[summary] Parameters ---------- x : torch.Tensor Tensor, containing a batch of input sequences. Format must match the specified format, defined by the batch_first agrument. Returns ------- h_n : torch.Tensor The hidden states of each time step of each sample in the batch. c_n : torch.Tensor] The cell states of each time step of each sample in the batch. """ if self.batch_first: x = x.transpose(0, 1) seq_len, batch_size, _ = x.size() h_0 = x.data.new(batch_size, self.hidden_size).zero_() c_0 = x.data.new(batch_size, self.hidden_size).zero_() h_x = h_0, c_0 h_n, c_n = [], [] bias_batch = self.bias.unsqueeze(0).expand(batch_size, *self.bias. size()) for t in range(seq_len): h_0, c_0 = h_x gates = torch.addmm(bias_batch, h_0, self.weight_hh) + torch.mm(x [t], self.weight_ih) f, i, o, g = gates.chunk(4, 1) c_1 = torch.sigmoid(f) * c_0 + torch.sigmoid(i) * torch.tanh(g) h_1 = torch.sigmoid(o) * torch.tanh(c_1) h_n.append(h_1) c_n.append(c_1) h_x = h_1, c_1 h_n = torch.stack(h_n, 0) c_n = torch.stack(c_n, 0) if self.batch_first: h_n = h_n.transpose(0, 1) c_n = c_n.transpose(0, 1) return h_n, c_n def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zero_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp25 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp26 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = 0.0 tmp19 = tmp17 * tmp18 tmp20 = tmp5 * tmp11 tmp21 = tmp19 + tmp20 tmp22 = 1.0 tmp23 = tmp22 - tmp17 tmp24 = tmp17 * tmp23 tmp27 = tmp25 + tmp26 tmp29 = tmp27 + tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = libdevice.tanh(tmp21) tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp21, xmask) tl.store(out_ptr3 + x2, tmp24, xmask) tl.store(out_ptr4 + x2, tmp30, xmask) tl.store(out_ptr5 + x2, tmp32, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp18 = tl.load(in_ptr3 + x2, xmask) tmp22 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp23 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp19 = tmp5 * tmp18 tmp20 = tmp11 * tmp17 tmp21 = tmp19 + tmp20 tmp24 = tmp22 + tmp23 tmp26 = tmp24 + tmp25 tmp27 = tl.sigmoid(tmp26) tmp28 = libdevice.tanh(tmp21) tmp29 = tmp27 * tmp28 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp17, xmask) tl.store(out_ptr3 + x2, tmp21, xmask) tl.store(out_ptr4 + x2, tmp27, xmask) tl.store(out_ptr5 + x2, tmp29, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp18 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp19 = tmp5 * tmp18 tmp20 = tmp11 * tmp17 tmp21 = tmp19 + tmp20 tmp22 = libdevice.tanh(tmp21) tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp17, xmask) tl.store(out_ptr3 + x2, tmp22, xmask) @triton.jit def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tl.store(out_ptr0 + x2, tmp5, xmask) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr2 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp23 = tl.load(in_ptr5 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp16, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp15, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_stack_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp20 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp21 = tmp19 * tmp20 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp16, tmp21, tmp22) tmp24 = tl.where(tmp14, tmp15, tmp23) tmp25 = tl.where(tmp9, tmp10, tmp24) tmp26 = tl.where(tmp4, tmp5, tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 16), (16, 1)) assert_size_stride(primals_4, (4, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zero_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(buf0, primals_3, out=buf1) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), primals_4, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(16)](buf1 , primals_2, buf2, buf3, buf4, buf5, buf33, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf2 del buf2 extern_kernels.mm(buf7, primals_3, out=buf8) buf9 = buf1 del buf1 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 4), primals_4, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf8, primals_2, buf9, buf5, buf10, buf11, buf12, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = buf9 del buf9 extern_kernels.mm(buf15, primals_3, out=buf16) buf17 = buf8 del buf8 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 8), primals_4, out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf16, primals_2, buf17, buf13, buf18, buf19, buf20, buf21, buf22, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = buf17 del buf17 extern_kernels.mm(buf23, primals_3, out=buf24) buf25 = buf16 del buf16 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (16, 1), 12 ), primals_4, out=buf25) del primals_4 buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_tanh_3[grid(16)](buf24, primals_2, buf25, buf21, buf26, buf27, buf28, buf30, 16, XBLOCK=16, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_sigmoid_4[grid(16)](buf24, primals_2, buf25, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf31 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0) del buf25 triton_poi_fused_stack_5[grid(64)](buf5, buf13, buf21, buf26, buf27, buf28, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) buf32 = reinterpret_tensor(buf24, (16, 4), (4, 1), 0) del buf24 triton_poi_fused_stack_6[grid(64)](buf7, buf15, buf23, buf29, buf30, buf32, 64, XBLOCK=64, num_warps=1, num_stages=1) return (reinterpret_tensor(buf32, (4, 4, 4), (4, 16, 1), 0), reinterpret_tensor(buf31, (4, 4, 4), (4, 16, 1), 0), buf0, buf3, buf4, buf5, buf6, buf10, buf11, buf12, buf13, buf14, buf18, buf19, buf20, buf21, buf22, buf26, buf27, buf28, buf29, buf30, reinterpret_tensor(primals_1, (4, 4), (1, 16), 12), reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), reinterpret_tensor(buf23, (4, 4), (1, 4), 0), reinterpret_tensor( primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(buf15, (4, 4), ( 1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 16), 4), reinterpret_tensor(buf7, (4, 4), (1, 4), 0), buf33, reinterpret_tensor(primals_1, (4, 4), (1, 16), 0)) class LSTMNew(nn.Module): """Implementation of the standard LSTM. TODO: Include ref and LaTeX equations Parameters ---------- input_size : int Number of input features hidden_size : int Number of hidden/memory cells. batch_first : bool, optional If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the shape has to be [seq, batch, features], by default True. initial_forget_bias : int, optional Value of the initial forget gate bias, by default 0 """ def __init__(self, input_size: 'int', hidden_size: 'int', batch_first: 'bool'=True, initial_forget_bias: 'int'=0): super(LSTMNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.batch_first = batch_first self.initial_forget_bias = initial_forget_bias self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 4 * hidden_size)) self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 4 * hidden_size)) self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size)) self.reset_parameters() def reset_parameters(self): """Initialize all learnable parameters of the LSTM""" nn.init.orthogonal_(self.weight_ih.data) weight_hh_data = torch.eye(self.hidden_size) weight_hh_data = weight_hh_data.repeat(1, 4) self.weight_hh.data = weight_hh_data nn.init.constant_(self.bias.data, val=0) if self.initial_forget_bias != 0: self.bias.data[:self.hidden_size] = self.initial_forget_bias def forward(self, input_0): primals_3 = self.weight_ih primals_4 = self.weight_hh primals_2 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
danielsuo/toy_flood
LSTM
false
15,134
[ "MIT" ]
49
471d3c4091d86d4a00fbf910937d4e60fdaf79a1
https://github.com/danielsuo/toy_flood/tree/471d3c4091d86d4a00fbf910937d4e60fdaf79a1
GatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/bm/cbms7k7oqr57xzgjmhazevrue5uae6mh2w7czwc7inftkzj7zfey.py # Topologically Sorted Source Nodes: [temps], Original ATen: [aten.convolution] # Source node to ATen node mapping: # temps => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2592 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 81) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/z3/cz33ttxbb3mrc2hi5wvc77rhp7nbghz2m25q3tb6nqzbjr536aek.py # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.glu] # Source node to ATen node mapping: # outputs => glu # Graph fragment: # %glu : [num_users=1] = call_function[target=torch.ops.aten.glu.default](args = (%convolution, 1), kwargs = {}) triton_poi_fused_glu_1 = async_compile.triton('triton_poi_fused_glu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_glu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_glu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 324 x1 = (xindex // 324) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (648*x1)), xmask) tmp1 = tl.load(in_ptr0 + (324 + x0 + (648*x1)), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [temps], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 9, 9), (648, 81, 9, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [temps], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 2592, grid=grid(2592), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.glu] triton_poi_fused_glu_1.run(buf1, buf2, 1296, grid=grid(1296), stream=stream0) return (buf2, primals_1, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F class GatedConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1): super(GatedConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, 2 * out_channels, kernel_size, stride, padding, dilation) def forward(self, inputs): temps = self.conv(inputs) outputs = F.glu(temps, dim=1) return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'stride': 1, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 2592 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_glu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 324 x1 = xindex // 324 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 648 * x1), xmask) tmp1 = tl.load(in_ptr0 + (324 + x0 + 648 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 9, 9), (648, 81, 9, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(2592)](buf1, primals_2, 2592, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) triton_poi_fused_glu_1[grid(1296)](buf1, buf2, 1296, XBLOCK=256, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1 class GatedConv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1): super(GatedConv2dNew, self).__init__() self.conv = nn.Conv2d(in_channels, 2 * out_channels, kernel_size, stride, padding, dilation) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
davidreiman/nsf
GatedConv2d
false
15,136
[ "MIT" ]
231
ed70316c3bf1acd4ffdf309f1773172c34e48320
https://github.com/davidreiman/nsf/tree/ed70316c3bf1acd4ffdf309f1773172c34e48320
decoder5
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/5d/c5dw65h6nafj4s44sgiahjrq6lb3zgwonovnkpx75jkkuxpl34xg.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 73728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), None, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/kk/ckkeehad7xvjmxduxmwzjdm4zu3f5inttmd6kj4pju55xhwrnoea.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # out_3 => add, add_1, convert_element_type, convert_element_type_1, iota_2, mul, mul_1 # Graph fragment: # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {}) # %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_1 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/js/cjsibzhf6ubyye7gfes254fjkkbrqi7tzoox57ltdxf6tnfl6hr4.py # Topologically Sorted Source Nodes: [out_1, out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # out_1 => convolution # out_2 => relu # out_3 => _unsafe_index_2 # out_4 => _unsafe_index_3, _unsafe_index_4 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, %sub_5, None]), kwargs = {}) # %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_3, [None, None, None, %sub_5]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 204800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 10) % 10 x0 = xindex % 10 x4 = (xindex // 100) x2 = (xindex // 100) % 512 x7 = xindex tmp0 = tl.load(in_ptr0 + (7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (7 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (4*tmp4) + (16*x4)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x7), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/e2/ce2k4lym2nqhf3w5dsvz6zgxc3jsam7d572srar4v3rqbuyr34g6.py # Topologically Sorted Source Nodes: [out_5, out_6, out_7], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_5 => convolution_1 # out_6 => relu_1 # out_7 => _unsafe_index_5, _unsafe_index_6 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_4, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %sub_5, None]), kwargs = {}) # %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_5, [None, None, None, %sub_5]), kwargs = {}) triton_poi_fused_convolution_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 204800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 10 x1 = (xindex // 10) % 10 x4 = (xindex // 100) x2 = (xindex // 100) % 512 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-7) + (tl_math.abs((-1) + x0))))) + ((-8)*(tl_math.abs((-7) + (tl_math.abs((-1) + x1))))) + (64*x4)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x5), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/7n/c7nyyvchtptp5rbqvks7cavg63gajcswx5tpkvypdmc7ocd2zaaz.py # Topologically Sorted Source Nodes: [out_16], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # out_16 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_12, mul_4, mul_5 # Graph fragment: # %iota_12 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_12, 1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {}) # %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_4 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bs/cbspecvkmrhwtp6geje57vqvyhtdbzqxeh4bkirrauq3bdgepjtc.py # Topologically Sorted Source Nodes: [out_14, out_15, out_16, out_17], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # out_14 => convolution_4 # out_15 => relu_4 # out_16 => _unsafe_index_11 # out_17 => _unsafe_index_12, _unsafe_index_13 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_10, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_4, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {}) # %_unsafe_index_12 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_11, [None, None, %sub_21, None]), kwargs = {}) # %_unsafe_index_13 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_12, [None, None, None, %sub_21]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 331776 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 18) % 18 x0 = xindex % 18 x4 = (xindex // 324) x2 = (xindex // 324) % 256 x7 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (8*tmp4) + (64*x4)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x7), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/6j/c6jeyn2lxggjjpib4soswcyajwy6iz7gvwfskycudxoh6ks5elxb.py # Topologically Sorted Source Nodes: [out_18, out_19, out_20], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_18 => convolution_5 # out_19 => relu_5 # out_20 => _unsafe_index_14, _unsafe_index_15 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_13, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) # %_unsafe_index_14 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_5, [None, None, %sub_21, None]), kwargs = {}) # %_unsafe_index_15 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_14, [None, None, None, %sub_21]), kwargs = {}) triton_poi_fused_convolution_reflection_pad2d_relu_6 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 331776 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = (xindex // 18) % 18 x4 = (xindex // 324) x2 = (xindex // 324) % 256 x5 = xindex tmp0 = tl.load(in_ptr0 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x4)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x5), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uw/cuwhadke7ul24n6hzb5sal45fu4ro7spo5s7tl5x4bo5jl7gz66f.py # Topologically Sorted Source Nodes: [out_29], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # out_29 => add_8, add_9, convert_element_type_8, convert_element_type_9, iota_22, mul_8, mul_9 # Graph fragment: # %iota_22 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_22, 1), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, 0), kwargs = {}) # %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_8, torch.float32), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.0), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_9, 0.5), kwargs = {}) # %convert_element_type_9 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_9, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_7 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/h3/ch3jm3bt3vnato3q323q7wlnky2jb7vevymckrrxpv7cesdxjfra.py # Topologically Sorted Source Nodes: [out_27, out_28, out_29, out_30], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # out_27 => convolution_8 # out_28 => relu_8 # out_29 => _unsafe_index_20 # out_30 => _unsafe_index_21, _unsafe_index_22 # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_19, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {}) # %_unsafe_index_20 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_8, [None, None, %unsqueeze_2, %convert_element_type_9]), kwargs = {}) # %_unsafe_index_21 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_20, [None, None, %sub_37, None]), kwargs = {}) # %_unsafe_index_22 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_21, [None, None, None, %sub_37]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 591872 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 34) % 34 x0 = xindex % 34 x4 = (xindex // 1156) x2 = (xindex // 1156) % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (16*tmp4) + (256*x4)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x7), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rh/crhey53viabe4hsp6ad6i2nee7q7zyv4njq6zse35tn7cpygnnbv.py # Topologically Sorted Source Nodes: [out_31, out_32, out_33], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_31 => convolution_9 # out_32 => relu_9 # out_33 => _unsafe_index_23, _unsafe_index_24 # Graph fragment: # %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_22, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {}) # %_unsafe_index_23 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_9, [None, None, %sub_37, None]), kwargs = {}) # %_unsafe_index_24 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_23, [None, None, None, %sub_37]), kwargs = {}) triton_poi_fused_convolution_reflection_pad2d_relu_9 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 591872 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 34 x1 = (xindex // 34) % 34 x4 = (xindex // 1156) x2 = (xindex // 1156) % 128 x5 = xindex tmp0 = tl.load(in_ptr0 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*x4)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x5), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bu/cbuvyw4fe3xbsdcjmotlovg3kxgvi3ofzc3qlcpjtkkuyytb22ws.py # Topologically Sorted Source Nodes: [out_36], Original ATen: [aten.arange] # Source node to ATen node mapping: # out_36 => iota_28 # Graph fragment: # %iota_28 : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) triton_poi_fused_arange_10 = async_compile.triton('triton_poi_fused_arange_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_arange_10(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/fn/cfndjiv3tblgvnqtytlbgh4w5inf7gba2up5wfjsuaevqfszyxak.py # Topologically Sorted Source Nodes: [out_36], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # out_36 => add_12, add_13, convert_element_type_12, convert_element_type_13, mul_12, mul_13 # Graph fragment: # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_28, 1), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, 0), kwargs = {}) # %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_12, torch.float32), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_12, 0.0), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_13, 0.5), kwargs = {}) # %convert_element_type_13 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_13, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_11 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_11(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rm/crmkhdiqj6544wyexgor4rnkds7gglwjet2cygw3o64zawf6zh7h.py # Topologically Sorted Source Nodes: [out_34, out_35, out_36, out_37], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # out_34 => convolution_10 # out_35 => relu_10 # out_36 => _unsafe_index_25 # out_37 => _unsafe_index_26, _unsafe_index_27 # Graph fragment: # %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_24, %primals_22, %primals_23, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {}) # %_unsafe_index_25 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_10, [None, None, %unsqueeze_3, %convert_element_type_13]), kwargs = {}) # %_unsafe_index_26 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_25, [None, None, %sub_45, None]), kwargs = {}) # %_unsafe_index_27 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_26, [None, None, None, %sub_45]), kwargs = {}) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 66) % 66 x0 = xindex % 66 x4 = (xindex // 4356) x2 = (xindex // 4356) % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1)))))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x0)))))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (32*tmp4) + (1024*x4)), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x7), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/3t/c3tabqt44sb4glrpzzppdivi6ri4ru3tms7yd2a263tjtky77sll.py # Topologically Sorted Source Nodes: [out_38, out_39, out_40], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_38 => convolution_11 # out_39 => relu_11 # out_40 => _unsafe_index_28, _unsafe_index_29 # Graph fragment: # %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_27, %primals_24, %primals_25, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_11 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {}) # %_unsafe_index_28 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_11, [None, None, %sub_45, None]), kwargs = {}) # %_unsafe_index_29 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_28, [None, None, None, %sub_45]), kwargs = {}) triton_poi_fused_convolution_reflection_pad2d_relu_13 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = (xindex // 66) % 66 x4 = (xindex // 4356) x2 = (xindex // 4356) % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1))))) + (4096*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x5), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ln/cln46y6bcljj5xvgyl7h2nw5krdqozyeit2hebhehl5zbzlutyda.py # Topologically Sorted Source Nodes: [out_41], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_41 => convolution_12 # Graph fragment: # %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_29, %primals_26, %primals_27, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_14 = async_compile.triton('triton_poi_fused_convolution_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cj/ccjccu7zc7ogdpljitls2hefhpl6qq2kpfnn26koefpxdfdv7teq.py # Topologically Sorted Source Nodes: [out_38, out_39], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_38 => convolution_11 # out_39 => relu_11 # Graph fragment: # %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_27, %primals_24, %primals_25, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_11 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {}) # %le_18 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_11, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_15 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/at/catvmzd72emtja24agoetxsfbfoouankui4el2vao7ua26an3h5e.py # Topologically Sorted Source Nodes: [out_34, out_35], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_34 => convolution_10 # out_35 => relu_10 # Graph fragment: # %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_24, %primals_22, %primals_23, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {}) # %le_37 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_10, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_16 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/eu/ceu4vjwnu7t72daazfpt2wr6v3l3op2tzwyfbbm2wcynm5vjzmkk.py # Topologically Sorted Source Nodes: [out_31, out_32], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_31 => convolution_9 # out_32 => relu_9 # Graph fragment: # %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_22, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {}) # %le_56 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_9, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_17 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/g7/cg7u5oo3boouswvtbbyzyk3b6wbhd74gf7zcz66gyyh2akxdphhi.py # Topologically Sorted Source Nodes: [out_27, out_28], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_27 => convolution_8 # out_28 => relu_8 # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_19, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {}) # %le_75 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_8, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_18 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/cl/ccl6rdg5ilfmge5dt7ngbjb3ox5h4bpvbi7ffnd7mckl4lruweqh.py # Topologically Sorted Source Nodes: [out_24, out_25], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_24 => convolution_7 # out_25 => relu_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_17, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) # %le_94 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_19 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 256 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/m3/cm3chxnerwp2olw2trs5cfxyfg5jjej3imjwwln6dvqacawhinod.py # Topologically Sorted Source Nodes: [out_14, out_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_14 => convolution_4 # out_15 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_10, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %le_151 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_20 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ts/ctswyyscojlcdj3bvsxhx5ujnib7w6opid7m4lucqvluzcl3n5pv.py # Topologically Sorted Source Nodes: [out_11, out_12], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_11 => convolution_3 # out_12 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_8, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le_170 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_21 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 512 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/bb/cbbl3sfaozoe4jbyrqweizaizeix4urgms7ffifscr2veiod4gcu.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => convolution # out_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le_227 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_22 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 512 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27 = args args.clear() assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_2, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (512, ), (1, )) assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_5, (512, ), (1, )) assert_size_stride(primals_6, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_7, (512, ), (1, )) assert_size_stride(primals_8, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_9, (512, ), (1, )) assert_size_stride(primals_10, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256, ), (1, )) assert_size_stride(primals_18, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (128, ), (1, )) assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (128, ), (1, )) assert_size_stride(primals_22, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (64, ), (1, )) assert_size_stride(primals_24, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_25, (64, ), (1, )) assert_size_stride(primals_26, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_27, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 73728, grid=grid(73728), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 512, 4, 4), (8192, 16, 4, 1)) buf2 = empty_strided_cuda((8, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_1.run(buf2, 8, grid=grid(8), stream=stream0) buf3 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1, out_2, out_3, out_4], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2.run(buf2, buf1, primals_3, buf3, 204800, grid=grid(204800), stream=stream0) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 512, 8, 8), (32768, 64, 8, 1)) buf5 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_5, out_6, out_7], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf4, primals_5, buf5, 204800, grid=grid(204800), stream=stream0) # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 512, 8, 8), (32768, 64, 8, 1)) buf7 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_8, out_9, out_10], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf6, primals_7, buf7, 204800, grid=grid(204800), stream=stream0) # Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 512, 8, 8), (32768, 64, 8, 1)) buf9 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) # Topologically Sorted Source Nodes: [out_11, out_12, out_13], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_3.run(buf8, primals_9, buf9, 204800, grid=grid(204800), stream=stream0) # Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 8, 8), (16384, 64, 8, 1)) buf11 = empty_strided_cuda((16, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_16], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_4.run(buf11, 16, grid=grid(16), stream=stream0) buf12 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) # Topologically Sorted Source Nodes: [out_14, out_15, out_16, out_17], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5.run(buf11, buf10, primals_11, buf12, 331776, grid=grid(331776), stream=stream0) # Topologically Sorted Source Nodes: [out_18], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 256, 16, 16), (65536, 256, 16, 1)) buf14 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) # Topologically Sorted Source Nodes: [out_18, out_19, out_20], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_6.run(buf13, primals_13, buf14, 331776, grid=grid(331776), stream=stream0) # Topologically Sorted Source Nodes: [out_21], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 16, 16), (65536, 256, 16, 1)) buf16 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) # Topologically Sorted Source Nodes: [out_21, out_22, out_23], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_6.run(buf15, primals_15, buf16, 331776, grid=grid(331776), stream=stream0) # Topologically Sorted Source Nodes: [out_24], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 16, 16), (65536, 256, 16, 1)) buf18 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) # Topologically Sorted Source Nodes: [out_24, out_25, out_26], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_6.run(buf17, primals_17, buf18, 331776, grid=grid(331776), stream=stream0) # Topologically Sorted Source Nodes: [out_27], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf20 = empty_strided_cuda((32, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_29], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_7.run(buf20, 32, grid=grid(32), stream=stream0) buf21 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32) # Topologically Sorted Source Nodes: [out_27, out_28, out_29, out_30], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8.run(buf20, buf19, primals_19, buf21, 591872, grid=grid(591872), stream=stream0) # Topologically Sorted Source Nodes: [out_31], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf23 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32) # Topologically Sorted Source Nodes: [out_31, out_32, out_33], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_9.run(buf22, primals_21, buf23, 591872, grid=grid(591872), stream=stream0) # Topologically Sorted Source Nodes: [out_34], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf25 = empty_strided_cuda((64, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_36], Original ATen: [aten.arange] triton_poi_fused_arange_10.run(buf25, 64, grid=grid(64), stream=stream0) buf26 = empty_strided_cuda((64, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [out_36], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_11.run(buf26, 64, grid=grid(64), stream=stream0) buf27 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) # Topologically Sorted Source Nodes: [out_34, out_35, out_36, out_37], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index, aten.reflection_pad2d] triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12.run(buf26, buf24, primals_23, buf27, 1115136, grid=grid(1115136), stream=stream0) # Topologically Sorted Source Nodes: [out_38], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf29 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) # Topologically Sorted Source Nodes: [out_38, out_39, out_40], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d] triton_poi_fused_convolution_reflection_pad2d_relu_13.run(buf28, primals_25, buf29, 1115136, grid=grid(1115136), stream=stream0) # Topologically Sorted Source Nodes: [out_41], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [out_41], Original ATen: [aten.convolution] triton_poi_fused_convolution_14.run(buf31, primals_27, 49152, grid=grid(49152), stream=stream0) del primals_27 buf32 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [out_38, out_39], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_15.run(buf28, primals_25, buf32, 1048576, grid=grid(1048576), stream=stream0) del buf28 del primals_25 buf33 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [out_34, out_35], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_16.run(buf24, primals_23, buf33, 262144, grid=grid(262144), stream=stream0) del buf24 del primals_23 buf34 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [out_31, out_32], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_17.run(buf22, primals_21, buf34, 524288, grid=grid(524288), stream=stream0) del buf22 del primals_21 buf35 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [out_27, out_28], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_18.run(buf19, primals_19, buf35, 131072, grid=grid(131072), stream=stream0) del buf19 del primals_19 buf36 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [out_24, out_25], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_19.run(buf17, primals_17, buf36, 262144, grid=grid(262144), stream=stream0) del buf17 del primals_17 buf37 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [out_21, out_22], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_19.run(buf15, primals_15, buf37, 262144, grid=grid(262144), stream=stream0) del buf15 del primals_15 buf38 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [out_18, out_19], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_19.run(buf13, primals_13, buf38, 262144, grid=grid(262144), stream=stream0) del buf13 del primals_13 buf39 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [out_14, out_15], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_20.run(buf10, primals_11, buf39, 65536, grid=grid(65536), stream=stream0) del buf10 del primals_11 buf40 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [out_11, out_12], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_21.run(buf8, primals_9, buf40, 131072, grid=grid(131072), stream=stream0) del buf8 del primals_9 buf41 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_21.run(buf6, primals_7, buf41, 131072, grid=grid(131072), stream=stream0) del buf6 del primals_7 buf42 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_21.run(buf4, primals_5, buf42, 131072, grid=grid(131072), stream=stream0) del buf4 del primals_5 buf43 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_22.run(buf1, primals_3, buf43, 32768, grid=grid(32768), stream=stream0) del buf1 del primals_3 return (buf31, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, buf0, buf2, buf3, buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf18, buf20, buf21, buf23, buf25, buf26, buf27, buf29, buf32, buf33, buf34, buf35, buf36, buf37, buf38, buf39, buf40, buf41, buf42, buf43, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((3, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class decoder5(nn.Module): def __init__(self): super(decoder5, self).__init__() self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv15 = nn.Conv2d(512, 512, 3, 1, 0) self.relu15 = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv16 = nn.Conv2d(512, 512, 3, 1, 0) self.relu16 = nn.ReLU(inplace=True) self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv17 = nn.Conv2d(512, 512, 3, 1, 0) self.relu17 = nn.ReLU(inplace=True) self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv18 = nn.Conv2d(512, 512, 3, 1, 0) self.relu18 = nn.ReLU(inplace=True) self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv19 = nn.Conv2d(512, 256, 3, 1, 0) self.relu19 = nn.ReLU(inplace=True) self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad20 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv20 = nn.Conv2d(256, 256, 3, 1, 0) self.relu20 = nn.ReLU(inplace=True) self.reflecPad21 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv21 = nn.Conv2d(256, 256, 3, 1, 0) self.relu21 = nn.ReLU(inplace=True) self.reflecPad22 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv22 = nn.Conv2d(256, 256, 3, 1, 0) self.relu22 = nn.ReLU(inplace=True) self.reflecPad23 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv23 = nn.Conv2d(256, 128, 3, 1, 0) self.relu23 = nn.ReLU(inplace=True) self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad24 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv24 = nn.Conv2d(128, 128, 3, 1, 0) self.relu24 = nn.ReLU(inplace=True) self.reflecPad25 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv25 = nn.Conv2d(128, 64, 3, 1, 0) self.relu25 = nn.ReLU(inplace=True) self.unpool4 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad26 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv26 = nn.Conv2d(64, 64, 3, 1, 0) self.relu26 = nn.ReLU(inplace=True) self.reflecPad27 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv27 = nn.Conv2d(64, 3, 3, 1, 0) def forward(self, x): out = self.reflecPad15(x) out = self.conv15(out) out = self.relu15(out) out = self.unpool(out) out = self.reflecPad16(out) out = self.conv16(out) out = self.relu16(out) out = self.reflecPad17(out) out = self.conv17(out) out = self.relu17(out) out = self.reflecPad18(out) out = self.conv18(out) out = self.relu18(out) out = self.reflecPad19(out) out = self.conv19(out) out = self.relu19(out) out = self.unpool2(out) out = self.reflecPad20(out) out = self.conv20(out) out = self.relu20(out) out = self.reflecPad21(out) out = self.conv21(out) out = self.relu21(out) out = self.reflecPad22(out) out = self.conv22(out) out = self.relu22(out) out = self.reflecPad23(out) out = self.conv23(out) out = self.relu23(out) out = self.unpool3(out) out = self.reflecPad24(out) out = self.conv24(out) out = self.relu24(out) out = self.reflecPad25(out) out = self.conv25(out) out = self.relu25(out) out = self.unpool4(out) out = self.reflecPad26(out) out = self.conv26(out) out = self.relu26(out) out = self.reflecPad27(out) out = self.conv27(out) return out def get_inputs(): return [torch.rand([4, 512, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 10 % 10 x0 = xindex % 10 x4 = xindex // 100 x2 = xindex // 100 % 512 x7 = xindex tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1 ))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0 ))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 10 x1 = xindex // 10 % 10 x4 = xindex // 100 x2 = xindex // 100 % 512 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 18 % 18 x0 = xindex % 18 x4 = xindex // 324 x2 = xindex // 324 % 256 x7 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x1))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 18 x1 = xindex // 18 % 18 x4 = xindex // 324 x2 = xindex // 324 % 256 x5 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_7(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 34 % 34 x0 = xindex % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x1))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 34 x1 = xindex // 34 % 34 x4 = xindex // 1156 x2 = xindex // 1156 % 128 x5 = xindex tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 + x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, None) @triton.jit def triton_poi_fused_arange_10(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_11(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12(in_ptr0 , in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 66 % 66 x0 = xindex % 66 x4 = xindex // 4356 x2 = xindex // 4356 % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x1))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x4), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1115136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 66 x1 = xindex // 66 % 66 x4 = xindex // 4356 x2 = xindex // 4356 % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 + x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_15(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_17(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_18(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_21(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27) = args args.clear() assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_2, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_7, (512,), (1,)) assert_size_stride(primals_8, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (128,), (1,)) assert_size_stride(primals_20, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_21, (128,), (1,)) assert_size_stride(primals_22, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_25, (64,), (1,)) assert_size_stride(primals_26, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_27, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 6, 6), (18432, 36, 6, 1), torch. float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(73728)](primals_1, buf0, 73728, XBLOCK=1024, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 512, 4, 4), (8192, 16, 4, 1)) buf2 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK =8, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid (204800)](buf2, buf1, primals_3, buf3, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 512, 8, 8), (32768, 64, 8, 1)) buf5 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(204800)](buf4 , primals_5, buf5, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 512, 8, 8), (32768, 64, 8, 1)) buf7 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(204800)](buf6 , primals_7, buf7, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 512, 8, 8), (32768, 64, 8, 1)) buf9 = empty_strided_cuda((4, 512, 10, 10), (51200, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(204800)](buf8 , primals_9, buf9, 204800, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 8, 8), (16384, 64, 8, 1)) buf11 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid (331776)](buf11, buf10, primals_11, buf12, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf13 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 256, 16, 16), (65536, 256, 16, 1)) buf14 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(331776)]( buf13, primals_13, buf14, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 16, 16), (65536, 256, 16, 1)) buf16 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(331776)]( buf15, primals_15, buf16, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf17 = extern_kernels.convolution(buf16, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 16, 16), (65536, 256, 16, 1)) buf18 = empty_strided_cuda((4, 256, 18, 18), (82944, 324, 18, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(331776)]( buf17, primals_17, buf18, 331776, XBLOCK=1024, num_warps=4, num_stages=1) buf19 = extern_kernels.convolution(buf18, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1)) buf20 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_7[grid(32)](buf20, 32, XBLOCK=32, num_warps=1, num_stages=1) buf21 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_8[grid (591872)](buf20, buf19, primals_19, buf21, 591872, XBLOCK=1024, num_warps=4, num_stages=1) buf22 = extern_kernels.convolution(buf21, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf23 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_9[grid(591872)]( buf22, primals_21, buf23, 591872, XBLOCK=512, num_warps=8, num_stages=1) buf24 = extern_kernels.convolution(buf23, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf25 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_arange_10[grid(64)](buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_11[grid(64)](buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_12[ grid(1115136)](buf26, buf24, primals_23, buf27, 1115136, XBLOCK =1024, num_warps=4, num_stages=1) buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf29 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_13[grid(1115136)]( buf28, primals_25, buf29, 1115136, XBLOCK=1024, num_warps=4, num_stages=1) buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_14[grid(49152)](buf31, primals_27, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_27 buf32 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_15[grid(1048576)]( buf28, primals_25, buf32, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf28 del primals_25 buf33 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_16[grid(262144)]( buf24, primals_23, buf33, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf24 del primals_23 buf34 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_17[grid(524288)]( buf22, primals_21, buf34, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf22 del primals_21 buf35 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_18[grid(131072)]( buf19, primals_19, buf35, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf19 del primals_19 buf36 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_19[grid(262144)]( buf17, primals_17, buf36, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf17 del primals_17 buf37 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_19[grid(262144)]( buf15, primals_15, buf37, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf15 del primals_15 buf38 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_19[grid(262144)]( buf13, primals_13, buf38, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf13 del primals_13 buf39 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_20[grid(65536)]( buf10, primals_11, buf39, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf10 del primals_11 buf40 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_21[grid(131072)]( buf8, primals_9, buf40, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf8 del primals_9 buf41 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_21[grid(131072)]( buf6, primals_7, buf41, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf6 del primals_7 buf42 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .bool) triton_poi_fused_convolution_relu_threshold_backward_21[grid(131072)]( buf4, primals_5, buf42, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf4 del primals_5 buf43 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_22[grid(32768)]( buf1, primals_3, buf43, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_3 return (buf31, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, buf0, buf2, buf3, buf5, buf7, buf9, buf11, buf12, buf14, buf16, buf18, buf20, buf21, buf23, buf25, buf26, buf27, buf29, buf32, buf33, buf34, buf35, buf36, buf37, buf38, buf39, buf40, buf41, buf42, buf43) class decoder5New(nn.Module): def __init__(self): super(decoder5New, self).__init__() self.reflecPad15 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv15 = nn.Conv2d(512, 512, 3, 1, 0) self.relu15 = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad16 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv16 = nn.Conv2d(512, 512, 3, 1, 0) self.relu16 = nn.ReLU(inplace=True) self.reflecPad17 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv17 = nn.Conv2d(512, 512, 3, 1, 0) self.relu17 = nn.ReLU(inplace=True) self.reflecPad18 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv18 = nn.Conv2d(512, 512, 3, 1, 0) self.relu18 = nn.ReLU(inplace=True) self.reflecPad19 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv19 = nn.Conv2d(512, 256, 3, 1, 0) self.relu19 = nn.ReLU(inplace=True) self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad20 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv20 = nn.Conv2d(256, 256, 3, 1, 0) self.relu20 = nn.ReLU(inplace=True) self.reflecPad21 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv21 = nn.Conv2d(256, 256, 3, 1, 0) self.relu21 = nn.ReLU(inplace=True) self.reflecPad22 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv22 = nn.Conv2d(256, 256, 3, 1, 0) self.relu22 = nn.ReLU(inplace=True) self.reflecPad23 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv23 = nn.Conv2d(256, 128, 3, 1, 0) self.relu23 = nn.ReLU(inplace=True) self.unpool3 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad24 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv24 = nn.Conv2d(128, 128, 3, 1, 0) self.relu24 = nn.ReLU(inplace=True) self.reflecPad25 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv25 = nn.Conv2d(128, 64, 3, 1, 0) self.relu25 = nn.ReLU(inplace=True) self.unpool4 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad26 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv26 = nn.Conv2d(64, 64, 3, 1, 0) self.relu26 = nn.ReLU(inplace=True) self.reflecPad27 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv27 = nn.Conv2d(64, 3, 3, 1, 0) def forward(self, input_0): primals_2 = self.conv15.weight primals_3 = self.conv15.bias primals_4 = self.conv16.weight primals_5 = self.conv16.bias primals_6 = self.conv17.weight primals_7 = self.conv17.bias primals_8 = self.conv18.weight primals_9 = self.conv18.bias primals_10 = self.conv19.weight primals_11 = self.conv19.bias primals_12 = self.conv20.weight primals_13 = self.conv20.bias primals_14 = self.conv21.weight primals_15 = self.conv21.bias primals_16 = self.conv22.weight primals_17 = self.conv22.bias primals_18 = self.conv23.weight primals_19 = self.conv23.bias primals_20 = self.conv24.weight primals_21 = self.conv24.bias primals_22 = self.conv25.weight primals_23 = self.conv25.bias primals_24 = self.conv26.weight primals_25 = self.conv26.bias primals_26 = self.conv27.weight primals_27 = self.conv27.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27]) return output[0]
czczup/URST
decoder5
false
15,137
[ "Apache-2.0" ]
119
000ec9f7728f12ffad989ec1d07b1dd579514133
https://github.com/czczup/URST/tree/000ec9f7728f12ffad989ec1d07b1dd579514133
FPELU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/7q/c7q6ux6r2dwmh2y57dhscu5lsozvpyu4safs4lp4277ik7cvhfqw.py # Topologically Sorted Source Nodes: [elu], Original ATen: [aten.elu] # Source node to ATen node mapping: # elu => expm1, gt, mul, mul_1, mul_2, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 0.3652895587790944), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = 0.3652895587790944 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp2, tmp4, tmp7) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [elu], Original ATen: [aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_elu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import torch.nn as nn class FPELU(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FPELU, self).__init__() self.alpha = random.random() def forward(self, x): from torch.nn import functional as F return F.elu(x, alpha=self.alpha) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = 0.3652895587790944 tmp7 = tmp5 * tmp6 tmp8 = tl.where(tmp2, tmp4, tmp7) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FPELUNew(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FPELUNew, self).__init__() self.alpha = random.random() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
FPELU
false
15,138
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
FLeakyReLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/gf/cgfzgiee7tfgul6hellmq4doyry3m3rv536dxz3f7byigv2z724r.py # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # leaky_relu => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.35897364545817423), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.35897364545817423 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import torch.nn as nn class FLeakyReLU(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FLeakyReLU, self).__init__() self.negative_slope = random.random() def forward(self, x): from torch.nn import functional as F return F.leaky_relu(x, self.negative_slope) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.35897364545817423 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class FLeakyReLUNew(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FLeakyReLUNew, self).__init__() self.negative_slope = random.random() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
FLeakyReLU
false
15,139
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
FSELU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ss/cssb3bkcvpgpfmvdjd4tw2xo5hkmrpohzyqtlslf2ke6cgvhlafb.py # Topologically Sorted Source Nodes: [selu], Original ATen: [aten.elu] # Source node to ATen node mapping: # selu => expm1, gt, mul, mul_1, mul_2, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0507009873554805), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.7580993408473766), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_elu_0 = async_compile.triton('triton_poi_fused_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0507009873554805 tmp4 = tmp0 * tmp3 tmp5 = 1.0 tmp6 = tmp0 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = 1.7580993408473766 tmp9 = tmp7 * tmp8 tmp10 = tl.where(tmp2, tmp4, tmp9) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [selu], Original ATen: [aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_elu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FSELU(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FSELU, self).__init__() def forward(self, x): from torch.nn import functional as F return F.selu(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0507009873554805 tmp4 = tmp0 * tmp3 tmp5 = 1.0 tmp6 = tmp0 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = 1.7580993408473766 tmp9 = tmp7 * tmp8 tmp10 = tl.where(tmp2, tmp4, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FSELUNew(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FSELUNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
FSELU
false
15,140
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
FThreshold
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/si/csia475wjzknpvzqlx5amyruagabvcdepgqgy6lqdsxs4bhttxts.py # Topologically Sorted Source Nodes: [result], Original ATen: [aten.threshold] # Source node to ATen node mapping: # result => full_default, le, where # Graph fragment: # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%arg0_1, 0.6282412823415753), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.1926536560058594), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %full_default, %arg0_1), kwargs = {}) triton_poi_fused_threshold_0 = async_compile.triton('triton_poi_fused_threshold_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_threshold_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_threshold_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.6282412823415753 tmp2 = tmp0 <= tmp1 tmp3 = 1.1926536560058594 tmp4 = tl.where(tmp2, tmp3, tmp0) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [result], Original ATen: [aten.threshold] stream0 = get_raw_stream(0) triton_poi_fused_threshold_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import torch.nn as nn class FThreshold(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FThreshold, self).__init__() self.threshold = random.random() self.value = self.threshold + random.random() def forward(self, x): from torch.nn import functional as F return F.threshold(x, threshold=self.threshold, value=self.value) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_threshold_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.6282412823415753 tmp2 = tmp0 <= tmp1 tmp3 = 1.1926536560058594 tmp4 = tl.where(tmp2, tmp3, tmp0) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_threshold_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FThresholdNew(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FThresholdNew, self).__init__() self.threshold = random.random() self.value = self.threshold + random.random() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
FThreshold
false
15,141
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
LayerLeakyReLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/sw/csww2hq2t5tvc6ztniqa2zwrucmdhit43p6dlv7kxqhf35fy3xdq.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.8161861224591765), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.8161861224591765 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import random import torch import torch.nn as nn class LayerLeakyReLU(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerLeakyReLU, self).__init__() self.negative_slope = random.random() self.leaky_relu = nn.LeakyReLU(negative_slope=self.negative_slope) def forward(self, x): x = self.leaky_relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import random import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.8161861224591765 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class LayerLeakyReLUNew(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerLeakyReLUNew, self).__init__() self.negative_slope = random.random() self.leaky_relu = nn.LeakyReLU(negative_slope=self.negative_slope) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
LayerLeakyReLU
false
15,142
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/rh/crhy6nilvaajphuuoyup37xl4ncuiyrcb3fnt5aboux6wyvcg7ie.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/qx/cqxjurwudxpwacfjyi5uacrhtvygta2tsfwdpfasfw2mllpzyigo.py # Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores_1 => div_1, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float("-inf")) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp9 / tmp13 tl.store(out_ptr2 + (r1 + (16*x0)), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ii/ciintdowljclmge7hoflnsg4o3754vwawhhvbc7yvv4pfyol3ce3.py # Topologically Sorted Source Nodes: [sum_1, att_weights], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # att_weights => div_2 # sum_1 => sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%slice_2, [1]), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 2), kwargs = {}) triton_poi_fused_div_sum_2 = async_compile.triton('triton_poi_fused_div_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = (xindex // 256) x2 = xindex tmp0 = tl.load(in_ptr0 + (512 + x0 + (1024*x1)), xmask) tmp1 = tl.load(in_ptr0 + (768 + x0 + (1024*x1)), xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/2s/c2s3zo6qtbodb6bdwv46ozxj4nxxymp76igm7emvdafvrj3673sn.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf1, primals_6, buf3, 16, 16, grid=grid(16, 16), stream=stream0) del primals_6 buf4 = reinterpret_tensor(buf1, (4, 4, 1, 16), (64, 16, 16, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf0, primals_3, buf4, 16, 16, grid=grid(16, 16), stream=stream0) del primals_3 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf5, buf8, 256, 16, grid=grid(256), stream=stream0) del buf5 buf9 = reinterpret_tensor(buf0, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_8, buf9, 16, 16, grid=grid(16, 16), stream=stream0) del primals_8 buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [sum_1, att_weights], Original ATen: [aten.sum, aten.div] triton_poi_fused_div_sum_2.run(buf8, buf11, 1024, grid=grid(1024), stream=stream0) buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf10, buf12, 64, 4, grid=grid(64, 4), stream=stream0) buf13 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_11 return (reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0), buf11, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F def attention(q, k, v, d_k, mask=None, dropout=None): """ :param q: queries, B x N_HEADS x seq_len x d_k :param k: keys, same dim as q :param v: values, same dim as q :param d_k: d_model/n_heads = 128/8 = 16 :param mask: mask for padding and future steps in the scores! :param dropout: dropout layer if any :return: attention vector of shape B x N_HEADS x seq_len x d_k """ scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1000000000.0) scores = F.softmax(scores, dim=-1) if dropout is not None: None scores = dropout(scores) output = torch.matmul(scores, v) return output, scores class MultiHeadAttention(nn.Module): def __init__(self, heads, d_model, dropout=0.0): super().__init__() self.d_model = d_model self.d_k = d_model // heads self.h = heads self.q_linear = nn.Linear(d_model, d_model) self.v_linear = nn.Linear(d_model, d_model) self.k_linear = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout) if dropout > 0 else None self.out = nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): bs = q.size(0) k = self.k_linear(k).view(bs, -1, self.h, self.d_k) q = self.q_linear(q).view(bs, -1, self.h, self.d_k) v = self.v_linear(v).view(bs, -1, self.h, self.d_k) k = k.transpose(1, 2) q = q.transpose(1, 2) v = v.transpose(1, 2) att_output, att_weights = attention(q, k, v, self.d_k, mask, self. dropout) att_weights = att_weights.detach()[:, -2:].sum(dim=1) / 2 concat = att_output.transpose(1, 2).contiguous().view(bs, -1, self. d_model) output = self.out(concat) return output, att_weights def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'heads': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp9 / tmp13 tl.store(out_ptr2 + (r1 + 16 * x0), tmp14, xmask) @triton.jit def triton_poi_fused_div_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = xindex // 256 x2 = xindex tmp0 = tl.load(in_ptr0 + (512 + x0 + 1024 * x1), xmask) tmp1 = tl.load(in_ptr0 + (768 + x0 + 1024 * x1), xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_6, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_6 buf4 = reinterpret_tensor(buf1, (4, 4, 1, 16), (64, 16, 16, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf4, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_per_fused__softmax_1[grid(256)](buf5, buf8, 256, 16, XBLOCK= 8, num_warps=2, num_stages=1) del buf5 buf9 = reinterpret_tensor(buf0, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf9, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) triton_poi_fused_div_sum_2[grid(1024)](buf8, buf11, 1024, XBLOCK= 128, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(64, 4)](buf10, buf12, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_11 return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0 ), buf11, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf12, (64, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0) def attention(q, k, v, d_k, mask=None, dropout=None): """ :param q: queries, B x N_HEADS x seq_len x d_k :param k: keys, same dim as q :param v: values, same dim as q :param d_k: d_model/n_heads = 128/8 = 16 :param mask: mask for padding and future steps in the scores! :param dropout: dropout layer if any :return: attention vector of shape B x N_HEADS x seq_len x d_k """ scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1000000000.0) scores = F.softmax(scores, dim=-1) if dropout is not None: None scores = dropout(scores) output = torch.matmul(scores, v) return output, scores class MultiHeadAttentionNew(nn.Module): def __init__(self, heads, d_model, dropout=0.0): super().__init__() self.d_model = d_model self.d_k = d_model // heads self.h = heads self.q_linear = nn.Linear(d_model, d_model) self.v_linear = nn.Linear(d_model, d_model) self.k_linear = nn.Linear(d_model, d_model) self.dropout = nn.Dropout(dropout) if dropout > 0 else None self.out = nn.Linear(d_model, d_model) def forward(self, input_0, input_1, input_2): primals_2 = self.q_linear.weight primals_3 = self.q_linear.bias primals_5 = self.v_linear.weight primals_6 = self.v_linear.bias primals_7 = self.k_linear.weight primals_8 = self.k_linear.bias primals_10 = self.out.weight primals_11 = self.out.bias primals_1 = input_0 primals_4 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1]
davide-belli/generative-graph-transformer
MultiHeadAttention
false
15,143
[ "MIT" ]
51
949aacf57246e8c28df7dfa38e5c59bf8b2b0ee8
https://github.com/davide-belli/generative-graph-transformer/tree/949aacf57246e8c28df7dfa38e5c59bf8b2b0ee8
FSub
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ti/ctiwtlkzafjwmem2faxm3nrewvafm35hnwxouu2udzs6lo3fx6gs.py # Topologically Sorted Source Nodes: [sub, x], Original ATen: [aten.sub] # Source node to ATen node mapping: # sub => sub # x => sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, 8.3), kwargs = {}) triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tmp3 = 8.3 tmp4 = tmp2 - tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, x], Original ATen: [aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FSub(nn.Module): def __init__(self): super(FSub, self).__init__() def forward(self, x, y): x = x - y - 8.3 return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = 8.3 tmp4 = tmp2 - tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class FSubNew(nn.Module): def __init__(self): super(FSubNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dawnclaude/onnx2keras
FSub
false
15,144
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
FFloorTest
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/c6/cc6vgxovztdx2wjzbxgaurezngk42rocush5evm6ny272baozkqb.py # Topologically Sorted Source Nodes: [floor], Original ATen: [aten.floor] # Source node to ATen node mapping: # floor => floor # Graph fragment: # %floor : [num_users=1] = call_function[target=torch.ops.aten.floor.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_floor_0 = async_compile.triton('triton_poi_fused_floor_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_floor_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_floor_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.floor(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [floor], Original ATen: [aten.floor] stream0 = get_raw_stream(0) triton_poi_fused_floor_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FFloorTest(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FFloorTest, self).__init__() def forward(self, x): return x.floor() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_floor_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.floor(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_floor_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class FFloorTestNew(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FFloorTestNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
FFloorTest
false
15,145
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
FTanh
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/dn/cdnhr6ixjduuhci57kobqjnehjrl22mcyjqzuuhvtxxshy437diy.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FTanh(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FTanh, self).__init__() def forward(self, x): from torch.nn import functional as F return F.tanh(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FTanhNew(nn.Module): """ Test for nn.functional types """ def __init__(self): super(FTanhNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
FTanh
false
15,146
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
LayerTanh
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/dn/cdnhr6ixjduuhci57kobqjnehjrl22mcyjqzuuhvtxxshy437diy.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LayerTanh(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerTanh, self).__init__() self.tanh = nn.Tanh() def forward(self, x): x = self.tanh(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class LayerTanhNew(nn.Module): """ Test for nn.layers based types """ def __init__(self): super(LayerTanhNew, self).__init__() self.tanh = nn.Tanh() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dawnclaude/onnx2keras
LayerTanh
false
15,147
[ "MIT" ]
115
3d2a47c0a228b91fd434232274e216e491da36e3
https://github.com/dawnclaude/onnx2keras/tree/3d2a47c0a228b91fd434232274e216e491da36e3
PointNetfeat
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/5c/c5cw2xmbzofx2wdu7faeg54uzihiemmoux3htg4322kfvw6qkdm2.py # Topologically Sorted Source Nodes: [conv1d, group_norm], Original ATen: [aten.convolution, aten.native_group_norm] # Source node to ATen node mapping: # conv1d => convolution # group_norm => add, rsqrt, var_mean # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused_convolution_native_group_norm_0 = async_compile.triton('triton_per_fused_convolution_native_group_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_native_group_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 64 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r5 = rindex x4 = xindex r3 = (rindex // 64) x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (r5 + (256*x4)), None) tmp1 = tl.load(in_ptr0 + (r3 + (4*x0)), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 256, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r5 + (256*x4)), tmp2, None) tl.store(out_ptr2 + (x4), tmp20, None) tl.store(out_ptr0 + (x4), tmp10, None) tl.store(out_ptr1 + (x4), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/65/c65ln4pxuzp4cqqq2ra4bgqqo7vfjki5wco54uipxkwpkjnbi7ts.py # Topologically Sorted Source Nodes: [group_norm, x, cat], Original ATen: [aten.native_group_norm, aten.relu, aten.cat] # Source node to ATen node mapping: # cat => cat # group_norm => add_1, mul_1 # x => relu # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_1), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%repeat, %relu], 1), kwargs = {}) triton_poi_fused_cat_native_group_norm_relu_1 = async_compile.triton('triton_poi_fused_cat_native_group_norm_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_native_group_norm_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_native_group_norm_relu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x4 = xindex x5 = (xindex // 64) x1 = (xindex // 64) % 64 x2 = (xindex // 4096) x3 = xindex % 4096 tmp0 = tl.load(in_ptr0 + (x4), None) tmp1 = tl.load(in_ptr1 + ((x5 // 4)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((x5 // 4)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x4), tmp15, None) tl.store(out_ptr1 + (x3 + (69632*x2)), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/4o/c4ottg4gxav5pkxjayveop5zkvhcpuy4q4jen6ouxlngvwyh7d5p.py # Topologically Sorted Source Nodes: [conv1d_1, group_norm_1], Original ATen: [aten.convolution, aten.native_group_norm] # Source node to ATen node mapping: # conv1d_1 => convolution_1 # group_norm_1 => add_2, rsqrt_1, var_mean_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_6, %primals_7, [1], [0], [1], False, [0], 1), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) triton_per_fused_convolution_native_group_norm_2 = async_compile.triton('triton_per_fused_convolution_native_group_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_native_group_norm_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_convolution_native_group_norm_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 64 XBLOCK: tl.constexpr = 1 rnumel = 512 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r5 = rindex x4 = xindex r3 = (rindex // 64) x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (r5 + (512*x4)), None) tmp1 = tl.load(in_ptr0 + (r3 + (8*x0)), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 512, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 512.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r5 + (512*x4)), tmp2, None) tl.store(out_ptr2 + (x4), tmp20, None) tl.store(out_ptr0 + (x4), tmp10, None) tl.store(out_ptr1 + (x4), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/mv/cmvcn7otv4mav4bmlybbbrbdq4go5hyvksjgkuj7cp4mqaaqepar.py # Topologically Sorted Source Nodes: [group_norm_1, x_1], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_1 => add_3, mul_3 # x_1 => relu_1 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_7), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_3,), kwargs = {}) triton_poi_fused_native_group_norm_relu_3 = async_compile.triton('triton_poi_fused_native_group_norm_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = (xindex // 64) x1 = (xindex // 64) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((x4 // 8)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((x4 // 8)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 512.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/o6/co64zvpirij64d5dxlcfb3ruosxirrgdffp2oyexad5qwlnot3j4.py # Topologically Sorted Source Nodes: [conv1d_2, x_2], Original ATen: [aten.convolution, aten.native_group_norm] # Source node to ATen node mapping: # conv1d_2 => convolution_2 # x_2 => add_4, rsqrt_2, var_mean_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_10, %primals_11, [1], [0], [1], False, [0], 1), kwargs = {}) # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {}) # %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_red_fused_convolution_native_group_norm_4 = async_compile.triton('triton_red_fused_convolution_native_group_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[64, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_convolution_native_group_norm_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_convolution_native_group_norm_4(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 64 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x4 = xindex x0 = xindex % 16 tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r5 = rindex r3 = (rindex // 64) tmp0 = tl.load(in_out_ptr0 + (r5 + (4096*x4)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (r3 + (64*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce( tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0 ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r5 + (4096*x4)), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford( tmp4_mean, tmp4_m2, tmp4_weight, 1 ) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6 = tmp6_tmp[:, None] tl.store(out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr1 + (x4), tmp5, xmask) tmp7 = 4096.0 tmp8 = tmp5 / tmp7 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tl.store(out_ptr2 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/jl/cjlaotxt2uy7v6hwhqyry7s5ccxqdfzwpozffyisoxu3ugaxggtk.py # Topologically Sorted Source Nodes: [x_2, max_1, x_4], Original ATen: [aten.native_group_norm, aten.max, aten.repeat] # Source node to ATen node mapping: # max_1 => max_1 # x_2 => add_5, mul_5 # x_4 => repeat # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_11), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %unsqueeze_9), kwargs = {}) # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%add_5, 2, True), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%view_6, [1, 1, 64]), kwargs = {}) triton_per_fused_max_native_group_norm_repeat_5 = async_compile.triton('triton_per_fused_max_native_group_norm_repeat_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4096, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_native_group_norm_repeat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_native_group_norm_repeat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4096 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 1024 x1 = (xindex // 1024) tmp0 = tl.load(in_ptr0 + (r2 + (64*x3)), None) tmp1 = tl.load(in_ptr1 + ((x3 // 64)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((x3 // 64)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 4096.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = triton_helpers.max2(tmp14, 1)[:, None] tmp18 = tl.broadcast_to(rindex, tmp14.shape) _, tmp17_tmp = triton_helpers.max_with_index(tmp14, tmp18, 1) tmp17 = tmp17_tmp[:, None] tl.store(out_ptr2 + (r2 + (64*x0) + (69632*x1)), tmp16, None) tl.store(out_ptr1 + (x3), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 3, 64), (192, 64, 1)) assert_size_stride(primals_2, (64, 3, 1), (3, 1, 1)) assert_size_stride(primals_3, (64, ), (1, )) assert_size_stride(primals_4, (64, ), (1, )) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 1), (64, 1, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, ), (1, )) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (1024, 128, 1), (128, 1, 1)) assert_size_stride(primals_11, (1024, ), (1, )) assert_size_stride(primals_12, (1024, ), (1, )) assert_size_stride(primals_13, (1024, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64), (4096, 64, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32) buf3 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32) buf5 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32) # Topologically Sorted Source Nodes: [conv1d, group_norm], Original ATen: [aten.convolution, aten.native_group_norm] stream0 = get_raw_stream(0) triton_per_fused_convolution_native_group_norm_0.run(buf1, primals_3, buf2, buf3, buf5, 64, 256, grid=grid(64), stream=stream0) del primals_3 buf6 = empty_strided_cuda((4, 64, 64), (4096, 64, 1), torch.float32) buf24 = empty_strided_cuda((4, 1088, 64), (69632, 64, 1), torch.float32) buf23 = reinterpret_tensor(buf24, (4, 64, 64), (69632, 64, 1), 65536) # alias # Topologically Sorted Source Nodes: [group_norm, x, cat], Original ATen: [aten.native_group_norm, aten.relu, aten.cat] triton_poi_fused_cat_native_group_norm_relu_1.run(buf1, buf2, buf3, primals_4, primals_5, buf6, buf23, 16384, grid=grid(16384), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 64), (8192, 64, 1)) buf8 = buf7; del buf7 # reuse buf9 = buf3; del buf3 # reuse buf10 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32) buf12 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32) # Topologically Sorted Source Nodes: [conv1d_1, group_norm_1], Original ATen: [aten.convolution, aten.native_group_norm] triton_per_fused_convolution_native_group_norm_2.run(buf8, primals_7, buf9, buf10, buf12, 64, 512, grid=grid(64), stream=stream0) del primals_7 buf13 = empty_strided_cuda((4, 128, 64), (8192, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [group_norm_1, x_1], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_3.run(buf8, buf9, buf10, primals_8, primals_9, buf13, 32768, grid=grid(32768), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf14, (4, 1024, 64), (65536, 64, 1)) buf15 = buf14; del buf14 # reuse buf16 = buf10; del buf10 # reuse buf17 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32) buf19 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32) # Topologically Sorted Source Nodes: [conv1d_2, x_2], Original ATen: [aten.convolution, aten.native_group_norm] triton_red_fused_convolution_native_group_norm_4.run(buf15, primals_11, buf16, buf17, buf19, 64, 4096, grid=grid(64), stream=stream0) del primals_11 buf21 = empty_strided_cuda((4, 1024, 1), (1024, 1, 1), torch.int64) buf22 = reinterpret_tensor(buf24, (4, 1024, 64), (69632, 64, 1), 0) # alias # Topologically Sorted Source Nodes: [x_2, max_1, x_4], Original ATen: [aten.native_group_norm, aten.max, aten.repeat] triton_per_fused_max_native_group_norm_repeat_5.run(buf15, buf16, buf17, primals_12, primals_13, buf21, buf22, 4096, 64, grid=grid(4096), stream=stream0) del buf17 del primals_13 return (buf24, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, buf1, reinterpret_tensor(buf2, (4, 16), (16, 1), 0), reinterpret_tensor(buf5, (4, 16), (16, 1), 0), buf6, buf8, reinterpret_tensor(buf9, (4, 16), (16, 1), 0), reinterpret_tensor(buf12, (4, 16), (16, 1), 0), buf13, buf15, reinterpret_tensor(buf16, (4, 16), (16, 1), 0), reinterpret_tensor(buf19, (4, 16), (16, 1), 0), buf21, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 64), (192, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 3, 1), (3, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 1), (64, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1024, 128, 1), (128, 1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.utils.data class PointNetfeat(nn.Module): """ Simple PointNet that extracts point-wise feature by concatenating local and global features. Uses group norm instead of batch norm. """ def __init__(self, input_dim=3, out_size=1024, layer_sizes=[64, 128]): super(PointNetfeat, self).__init__() self.output_size = out_size self.input_dim = input_dim self.conv1 = torch.nn.Conv1d(self.input_dim, layer_sizes[0], 1) self.conv2 = torch.nn.Conv1d(layer_sizes[0], layer_sizes[1], 1) self.conv3 = torch.nn.Conv1d(layer_sizes[1], self.output_size, 1) self.bn1 = nn.GroupNorm(16, layer_sizes[0]) self.bn2 = nn.GroupNorm(16, layer_sizes[1]) self.bn3 = nn.GroupNorm(16, self.output_size) def forward(self, x): n_pts = x.size()[2] x = F.relu(self.bn1(self.conv1(x))) pointfeat = x x = F.relu(self.bn2(self.conv2(x))) x = self.bn3(self.conv3(x)) max_op = torch.max(x, 2, keepdim=True) x = max_op[0] max_op[1] x = x.view(-1, self.output_size, 1).repeat(1, 1, n_pts) return torch.cat([x, pointfeat], 1) def get_inputs(): return [torch.rand([4, 3, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_convolution_native_group_norm_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r5 = rindex x4 = xindex r3 = rindex // 64 x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (r5 + 256 * x4), None) tmp1 = tl.load(in_ptr0 + (r3 + 4 * x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 256, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r5 + 256 * x4), tmp2, None) tl.store(out_ptr2 + x4, tmp20, None) tl.store(out_ptr0 + x4, tmp10, None) tl.store(out_ptr1 + x4, tmp15, None) @triton.jit def triton_poi_fused_cat_native_group_norm_relu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x5 = xindex // 64 x1 = xindex // 64 % 64 x2 = xindex // 4096 x3 = xindex % 4096 tmp0 = tl.load(in_ptr0 + x4, None) tmp1 = tl.load(in_ptr1 + x5 // 4, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x5 // 4, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x4, tmp15, None) tl.store(out_ptr1 + (x3 + 69632 * x2), tmp15, None) @triton.jit def triton_per_fused_convolution_native_group_norm_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r5 = rindex x4 = xindex r3 = rindex // 64 x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (r5 + 512 * x4), None) tmp1 = tl.load(in_ptr0 + (r3 + 8 * x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tl.full([1], 512, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 512.0 tmp17 = tmp15 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(in_out_ptr0 + (r5 + 512 * x4), tmp2, None) tl.store(out_ptr2 + x4, tmp20, None) tl.store(out_ptr0 + x4, tmp10, None) tl.store(out_ptr1 + x4, tmp15, None) @triton.jit def triton_poi_fused_native_group_norm_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x4 = xindex // 64 x1 = xindex // 64 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x4 // 8, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4 // 8, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 512.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_convolution_native_group_norm_4(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 64 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x4 = xindex x0 = xindex % 16 tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r5 = rindex r3 = rindex // 64 tmp0 = tl.load(in_out_ptr0 + (r5 + 4096 * x4), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr0 + (r3 + 64 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers. welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0) ) tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean) tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2) tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight) tl.store(in_out_ptr0 + (r5 + 4096 * x4), tmp2, rmask & xmask) tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean, tmp4_m2, tmp4_weight, 1) tmp4 = tmp4_tmp[:, None] tmp5 = tmp5_tmp[:, None] tmp6_tmp[:, None] tl.store(out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr1 + x4, tmp5, xmask) tmp7 = 4096.0 tmp8 = tmp5 / tmp7 tmp9 = 1e-05 tmp10 = tmp8 + tmp9 tmp11 = libdevice.rsqrt(tmp10) tl.store(out_ptr2 + x4, tmp11, xmask) @triton.jit def triton_per_fused_max_native_group_norm_repeat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 1024 x1 = xindex // 1024 tmp0 = tl.load(in_ptr0 + (r2 + 64 * x3), None) tmp1 = tl.load(in_ptr1 + x3 // 64, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x3 // 64, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 4096.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = triton_helpers.max2(tmp14, 1)[:, None] tmp18 = tl.broadcast_to(rindex, tmp14.shape) _, tmp17_tmp = triton_helpers.max_with_index(tmp14, tmp18, 1) tmp17 = tmp17_tmp[:, None] tl.store(out_ptr2 + (r2 + 64 * x0 + 69632 * x1), tmp16, None) tl.store(out_ptr1 + x3, tmp17, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 3, 64), (192, 64, 1)) assert_size_stride(primals_2, (64, 3, 1), (3, 1, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64,), (1,)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 1), (64, 1, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128,), (1,)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (1024, 128, 1), (128, 1, 1)) assert_size_stride(primals_11, (1024,), (1,)) assert_size_stride(primals_12, (1024,), (1,)) assert_size_stride(primals_13, (1024,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64), (4096, 64, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32 ) buf3 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32 ) buf5 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32 ) get_raw_stream(0) triton_per_fused_convolution_native_group_norm_0[grid(64)](buf1, primals_3, buf2, buf3, buf5, 64, 256, num_warps=2, num_stages=1) del primals_3 buf6 = empty_strided_cuda((4, 64, 64), (4096, 64, 1), torch.float32) buf24 = empty_strided_cuda((4, 1088, 64), (69632, 64, 1), torch.float32 ) buf23 = reinterpret_tensor(buf24, (4, 64, 64), (69632, 64, 1), 65536) triton_poi_fused_cat_native_group_norm_relu_1[grid(16384)](buf1, buf2, buf3, primals_4, primals_5, buf6, buf23, 16384, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf7, (4, 128, 64), (8192, 64, 1)) buf8 = buf7 del buf7 buf9 = buf3 del buf3 buf10 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) buf12 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) triton_per_fused_convolution_native_group_norm_2[grid(64)](buf8, primals_7, buf9, buf10, buf12, 64, 512, num_warps=4, num_stages=1) del primals_7 buf13 = empty_strided_cuda((4, 128, 64), (8192, 64, 1), torch.float32) triton_poi_fused_native_group_norm_relu_3[grid(32768)](buf8, buf9, buf10, primals_8, primals_9, buf13, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf14, (4, 1024, 64), (65536, 64, 1)) buf15 = buf14 del buf14 buf16 = buf10 del buf10 buf17 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) buf19 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch. float32) triton_red_fused_convolution_native_group_norm_4[grid(64)](buf15, primals_11, buf16, buf17, buf19, 64, 4096, XBLOCK=1, RBLOCK= 2048, num_warps=16, num_stages=1) del primals_11 buf21 = empty_strided_cuda((4, 1024, 1), (1024, 1, 1), torch.int64) buf22 = reinterpret_tensor(buf24, (4, 1024, 64), (69632, 64, 1), 0) triton_per_fused_max_native_group_norm_repeat_5[grid(4096)](buf15, buf16, buf17, primals_12, primals_13, buf21, buf22, 4096, 64, XBLOCK=32, num_warps=8, num_stages=1) del buf17 del primals_13 return (buf24, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, buf1, reinterpret_tensor(buf2, (4, 16), (16, 1), 0), reinterpret_tensor(buf5, (4, 16), (16, 1), 0), buf6, buf8, reinterpret_tensor(buf9, (4, 16), (16, 1), 0), reinterpret_tensor( buf12, (4, 16), (16, 1), 0), buf13, buf15, reinterpret_tensor(buf16, (4, 16), (16, 1), 0), reinterpret_tensor(buf19, (4, 16), (16, 1), 0 ), buf21) class PointNetfeatNew(nn.Module): """ Simple PointNet that extracts point-wise feature by concatenating local and global features. Uses group norm instead of batch norm. """ def __init__(self, input_dim=3, out_size=1024, layer_sizes=[64, 128]): super(PointNetfeatNew, self).__init__() self.output_size = out_size self.input_dim = input_dim self.conv1 = torch.nn.Conv1d(self.input_dim, layer_sizes[0], 1) self.conv2 = torch.nn.Conv1d(layer_sizes[0], layer_sizes[1], 1) self.conv3 = torch.nn.Conv1d(layer_sizes[1], self.output_size, 1) self.bn1 = nn.GroupNorm(16, layer_sizes[0]) self.bn2 = nn.GroupNorm(16, layer_sizes[1]) self.bn3 = nn.GroupNorm(16, self.output_size) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_10 = self.conv3.weight primals_11 = self.conv3.bias primals_4 = self.bn1.weight primals_5 = self.bn1.bias primals_8 = self.bn2.weight primals_9 = self.bn2.bias primals_12 = self.bn3.weight primals_13 = self.bn3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
davrempe/caspr
PointNetfeat
false
15,148
[ "MIT" ]
65
a02edb4be11f5ccfe563b2a7869ee8e731e0f8ff
https://github.com/davrempe/caspr/tree/a02edb4be11f5ccfe563b2a7869ee8e731e0f8ff